Source file src/runtime/proc.go

Documentation: runtime

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/cpu"
     9  	"runtime/internal/atomic"
    10  	"runtime/internal/sys"
    11  	"unsafe"
    12  )
    13  
    14  var buildVersion = sys.TheVersion
    15  
    16  // set using cmd/go/internal/modload.ModInfoProg
    17  var modinfo string
    18  
    19  // Goroutine scheduler
    20  // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
    21  //
    22  // The main concepts are:
    23  // G - goroutine.
    24  // M - worker thread, or machine.
    25  // P - processor, a resource that is required to execute Go code.
    26  //     M must have an associated P to execute Go code, however it can be
    27  //     blocked or in a syscall w/o an associated P.
    28  //
    29  // Design doc at https://golang.org/s/go11sched.
    30  
    31  // Worker thread parking/unparking.
    32  // We need to balance between keeping enough running worker threads to utilize
    33  // available hardware parallelism and parking excessive running worker threads
    34  // to conserve CPU resources and power. This is not simple for two reasons:
    35  // (1) scheduler state is intentionally distributed (in particular, per-P work
    36  // queues), so it is not possible to compute global predicates on fast paths;
    37  // (2) for optimal thread management we would need to know the future (don't park
    38  // a worker thread when a new goroutine will be readied in near future).
    39  //
    40  // Three rejected approaches that would work badly:
    41  // 1. Centralize all scheduler state (would inhibit scalability).
    42  // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
    43  //    is a spare P, unpark a thread and handoff it the thread and the goroutine.
    44  //    This would lead to thread state thrashing, as the thread that readied the
    45  //    goroutine can be out of work the very next moment, we will need to park it.
    46  //    Also, it would destroy locality of computation as we want to preserve
    47  //    dependent goroutines on the same thread; and introduce additional latency.
    48  // 3. Unpark an additional thread whenever we ready a goroutine and there is an
    49  //    idle P, but don't do handoff. This would lead to excessive thread parking/
    50  //    unparking as the additional threads will instantly park without discovering
    51  //    any work to do.
    52  //
    53  // The current approach:
    54  // We unpark an additional thread when we ready a goroutine if (1) there is an
    55  // idle P and there are no "spinning" worker threads. A worker thread is considered
    56  // spinning if it is out of local work and did not find work in global run queue/
    57  // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning.
    58  // Threads unparked this way are also considered spinning; we don't do goroutine
    59  // handoff so such threads are out of work initially. Spinning threads do some
    60  // spinning looking for work in per-P run queues before parking. If a spinning
    61  // thread finds work it takes itself out of the spinning state and proceeds to
    62  // execution. If it does not find work it takes itself out of the spinning state
    63  // and then parks.
    64  // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark
    65  // new threads when readying goroutines. To compensate for that, if the last spinning
    66  // thread finds work and stops spinning, it must unpark a new spinning thread.
    67  // This approach smooths out unjustified spikes of thread unparking,
    68  // but at the same time guarantees eventual maximal CPU parallelism utilization.
    69  //
    70  // The main implementation complication is that we need to be very careful during
    71  // spinning->non-spinning thread transition. This transition can race with submission
    72  // of a new goroutine, and either one part or another needs to unpark another worker
    73  // thread. If they both fail to do that, we can end up with semi-persistent CPU
    74  // underutilization. The general pattern for goroutine readying is: submit a goroutine
    75  // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning.
    76  // The general pattern for spinning->non-spinning transition is: decrement nmspinning,
    77  // #StoreLoad-style memory barrier, check all per-P work queues for new work.
    78  // Note that all this complexity does not apply to global run queue as we are not
    79  // sloppy about thread unparking when submitting to global queue. Also see comments
    80  // for nmspinning manipulation.
    81  
    82  var (
    83  	m0           m
    84  	g0           g
    85  	raceprocctx0 uintptr
    86  )
    87  
    88  //go:linkname runtime_inittask runtime..inittask
    89  var runtime_inittask initTask
    90  
    91  //go:linkname main_inittask main..inittask
    92  var main_inittask initTask
    93  
    94  // main_init_done is a signal used by cgocallbackg that initialization
    95  // has been completed. It is made before _cgo_notify_runtime_init_done,
    96  // so all cgo calls can rely on it existing. When main_init is complete,
    97  // it is closed, meaning cgocallbackg can reliably receive from it.
    98  var main_init_done chan bool
    99  
   100  //go:linkname main_main main.main
   101  func main_main()
   102  
   103  // mainStarted indicates that the main M has started.
   104  var mainStarted bool
   105  
   106  // runtimeInitTime is the nanotime() at which the runtime started.
   107  var runtimeInitTime int64
   108  
   109  // Value to use for signal mask for newly created M's.
   110  var initSigmask sigset
   111  
   112  // The main goroutine.
   113  func main() {
   114  	g := getg()
   115  
   116  	// Racectx of m0->g0 is used only as the parent of the main goroutine.
   117  	// It must not be used for anything else.
   118  	g.m.g0.racectx = 0
   119  
   120  	// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
   121  	// Using decimal instead of binary GB and MB because
   122  	// they look nicer in the stack overflow failure message.
   123  	if sys.PtrSize == 8 {
   124  		maxstacksize = 1000000000
   125  	} else {
   126  		maxstacksize = 250000000
   127  	}
   128  
   129  	// Allow newproc to start new Ms.
   130  	mainStarted = true
   131  
   132  	if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
   133  		systemstack(func() {
   134  			newm(sysmon, nil)
   135  		})
   136  	}
   137  
   138  	// Lock the main goroutine onto this, the main OS thread,
   139  	// during initialization. Most programs won't care, but a few
   140  	// do require certain calls to be made by the main thread.
   141  	// Those can arrange for main.main to run in the main thread
   142  	// by calling runtime.LockOSThread during initialization
   143  	// to preserve the lock.
   144  	lockOSThread()
   145  
   146  	if g.m != &m0 {
   147  		throw("runtime.main not on m0")
   148  	}
   149  
   150  	doInit(&runtime_inittask) // must be before defer
   151  	if nanotime() == 0 {
   152  		throw("nanotime returning zero")
   153  	}
   154  
   155  	// Defer unlock so that runtime.Goexit during init does the unlock too.
   156  	needUnlock := true
   157  	defer func() {
   158  		if needUnlock {
   159  			unlockOSThread()
   160  		}
   161  	}()
   162  
   163  	// Record when the world started.
   164  	runtimeInitTime = nanotime()
   165  
   166  	gcenable()
   167  
   168  	main_init_done = make(chan bool)
   169  	if iscgo {
   170  		if _cgo_thread_start == nil {
   171  			throw("_cgo_thread_start missing")
   172  		}
   173  		if GOOS != "windows" {
   174  			if _cgo_setenv == nil {
   175  				throw("_cgo_setenv missing")
   176  			}
   177  			if _cgo_unsetenv == nil {
   178  				throw("_cgo_unsetenv missing")
   179  			}
   180  		}
   181  		if _cgo_notify_runtime_init_done == nil {
   182  			throw("_cgo_notify_runtime_init_done missing")
   183  		}
   184  		// Start the template thread in case we enter Go from
   185  		// a C-created thread and need to create a new thread.
   186  		startTemplateThread()
   187  		cgocall(_cgo_notify_runtime_init_done, nil)
   188  	}
   189  
   190  	doInit(&main_inittask)
   191  
   192  	close(main_init_done)
   193  
   194  	needUnlock = false
   195  	unlockOSThread()
   196  
   197  	if isarchive || islibrary {
   198  		// A program compiled with -buildmode=c-archive or c-shared
   199  		// has a main, but it is not executed.
   200  		return
   201  	}
   202  	fn := main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
   203  	fn()
   204  	if raceenabled {
   205  		racefini()
   206  	}
   207  
   208  	// Make racy client program work: if panicking on
   209  	// another goroutine at the same time as main returns,
   210  	// let the other goroutine finish printing the panic trace.
   211  	// Once it does, it will exit. See issues 3934 and 20018.
   212  	if atomic.Load(&runningPanicDefers) != 0 {
   213  		// Running deferred functions should not take long.
   214  		for c := 0; c < 1000; c++ {
   215  			if atomic.Load(&runningPanicDefers) == 0 {
   216  				break
   217  			}
   218  			Gosched()
   219  		}
   220  	}
   221  	if atomic.Load(&panicking) != 0 {
   222  		gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
   223  	}
   224  
   225  	exit(0)
   226  	for {
   227  		var x *int32
   228  		*x = 0
   229  	}
   230  }
   231  
   232  // os_beforeExit is called from os.Exit(0).
   233  //go:linkname os_beforeExit os.runtime_beforeExit
   234  func os_beforeExit() {
   235  	if raceenabled {
   236  		racefini()
   237  	}
   238  }
   239  
   240  // start forcegc helper goroutine
   241  func init() {
   242  	go forcegchelper()
   243  }
   244  
   245  func forcegchelper() {
   246  	forcegc.g = getg()
   247  	for {
   248  		lock(&forcegc.lock)
   249  		if forcegc.idle != 0 {
   250  			throw("forcegc: phase error")
   251  		}
   252  		atomic.Store(&forcegc.idle, 1)
   253  		goparkunlock(&forcegc.lock, waitReasonForceGGIdle, traceEvGoBlock, 1)
   254  		// this goroutine is explicitly resumed by sysmon
   255  		if debug.gctrace > 0 {
   256  			println("GC forced")
   257  		}
   258  		// Time-triggered, fully concurrent.
   259  		gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
   260  	}
   261  }
   262  
   263  //go:nosplit
   264  
   265  // Gosched yields the processor, allowing other goroutines to run. It does not
   266  // suspend the current goroutine, so execution resumes automatically.
   267  func Gosched() {
   268  	checkTimeouts()
   269  	mcall(gosched_m)
   270  }
   271  
   272  // goschedguarded yields the processor like gosched, but also checks
   273  // for forbidden states and opts out of the yield in those cases.
   274  //go:nosplit
   275  func goschedguarded() {
   276  	mcall(goschedguarded_m)
   277  }
   278  
   279  // Puts the current goroutine into a waiting state and calls unlockf.
   280  // If unlockf returns false, the goroutine is resumed.
   281  // unlockf must not access this G's stack, as it may be moved between
   282  // the call to gopark and the call to unlockf.
   283  // Reason explains why the goroutine has been parked.
   284  // It is displayed in stack traces and heap dumps.
   285  // Reasons should be unique and descriptive.
   286  // Do not re-use reasons, add new ones.
   287  func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
   288  	if reason != waitReasonSleep {
   289  		checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy
   290  	}
   291  	mp := acquirem()
   292  	gp := mp.curg
   293  	status := readgstatus(gp)
   294  	if status != _Grunning && status != _Gscanrunning {
   295  		throw("gopark: bad g status")
   296  	}
   297  	mp.waitlock = lock
   298  	mp.waitunlockf = unlockf
   299  	gp.waitreason = reason
   300  	mp.waittraceev = traceEv
   301  	mp.waittraceskip = traceskip
   302  	releasem(mp)
   303  	// can't do anything that might move the G between Ms here.
   304  	mcall(park_m)
   305  }
   306  
   307  // Puts the current goroutine into a waiting state and unlocks the lock.
   308  // The goroutine can be made runnable again by calling goready(gp).
   309  func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
   310  	gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
   311  }
   312  
   313  func goready(gp *g, traceskip int) {
   314  	systemstack(func() {
   315  		ready(gp, traceskip, true)
   316  	})
   317  }
   318  
   319  //go:nosplit
   320  func acquireSudog() *sudog {
   321  	// Delicate dance: the semaphore implementation calls
   322  	// acquireSudog, acquireSudog calls new(sudog),
   323  	// new calls malloc, malloc can call the garbage collector,
   324  	// and the garbage collector calls the semaphore implementation
   325  	// in stopTheWorld.
   326  	// Break the cycle by doing acquirem/releasem around new(sudog).
   327  	// The acquirem/releasem increments m.locks during new(sudog),
   328  	// which keeps the garbage collector from being invoked.
   329  	mp := acquirem()
   330  	pp := mp.p.ptr()
   331  	if len(pp.sudogcache) == 0 {
   332  		lock(&sched.sudoglock)
   333  		// First, try to grab a batch from central cache.
   334  		for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
   335  			s := sched.sudogcache
   336  			sched.sudogcache = s.next
   337  			s.next = nil
   338  			pp.sudogcache = append(pp.sudogcache, s)
   339  		}
   340  		unlock(&sched.sudoglock)
   341  		// If the central cache is empty, allocate a new one.
   342  		if len(pp.sudogcache) == 0 {
   343  			pp.sudogcache = append(pp.sudogcache, new(sudog))
   344  		}
   345  	}
   346  	n := len(pp.sudogcache)
   347  	s := pp.sudogcache[n-1]
   348  	pp.sudogcache[n-1] = nil
   349  	pp.sudogcache = pp.sudogcache[:n-1]
   350  	if s.elem != nil {
   351  		throw("acquireSudog: found s.elem != nil in cache")
   352  	}
   353  	releasem(mp)
   354  	return s
   355  }
   356  
   357  //go:nosplit
   358  func releaseSudog(s *sudog) {
   359  	if s.elem != nil {
   360  		throw("runtime: sudog with non-nil elem")
   361  	}
   362  	if s.isSelect {
   363  		throw("runtime: sudog with non-false isSelect")
   364  	}
   365  	if s.next != nil {
   366  		throw("runtime: sudog with non-nil next")
   367  	}
   368  	if s.prev != nil {
   369  		throw("runtime: sudog with non-nil prev")
   370  	}
   371  	if s.waitlink != nil {
   372  		throw("runtime: sudog with non-nil waitlink")
   373  	}
   374  	if s.c != nil {
   375  		throw("runtime: sudog with non-nil c")
   376  	}
   377  	gp := getg()
   378  	if gp.param != nil {
   379  		throw("runtime: releaseSudog with non-nil gp.param")
   380  	}
   381  	mp := acquirem() // avoid rescheduling to another P
   382  	pp := mp.p.ptr()
   383  	if len(pp.sudogcache) == cap(pp.sudogcache) {
   384  		// Transfer half of local cache to the central cache.
   385  		var first, last *sudog
   386  		for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
   387  			n := len(pp.sudogcache)
   388  			p := pp.sudogcache[n-1]
   389  			pp.sudogcache[n-1] = nil
   390  			pp.sudogcache = pp.sudogcache[:n-1]
   391  			if first == nil {
   392  				first = p
   393  			} else {
   394  				last.next = p
   395  			}
   396  			last = p
   397  		}
   398  		lock(&sched.sudoglock)
   399  		last.next = sched.sudogcache
   400  		sched.sudogcache = first
   401  		unlock(&sched.sudoglock)
   402  	}
   403  	pp.sudogcache = append(pp.sudogcache, s)
   404  	releasem(mp)
   405  }
   406  
   407  // funcPC returns the entry PC of the function f.
   408  // It assumes that f is a func value. Otherwise the behavior is undefined.
   409  // CAREFUL: In programs with plugins, funcPC can return different values
   410  // for the same function (because there are actually multiple copies of
   411  // the same function in the address space). To be safe, don't use the
   412  // results of this function in any == expression. It is only safe to
   413  // use the result as an address at which to start executing code.
   414  //go:nosplit
   415  func funcPC(f interface{}) uintptr {
   416  	return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize))
   417  }
   418  
   419  // called from assembly
   420  func badmcall(fn func(*g)) {
   421  	throw("runtime: mcall called on m->g0 stack")
   422  }
   423  
   424  func badmcall2(fn func(*g)) {
   425  	throw("runtime: mcall function returned")
   426  }
   427  
   428  func badreflectcall() {
   429  	panic(plainError("arg size to reflect.call more than 1GB"))
   430  }
   431  
   432  var badmorestackg0Msg = "fatal: morestack on g0\n"
   433  
   434  //go:nosplit
   435  //go:nowritebarrierrec
   436  func badmorestackg0() {
   437  	sp := stringStructOf(&badmorestackg0Msg)
   438  	write(2, sp.str, int32(sp.len))
   439  }
   440  
   441  var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
   442  
   443  //go:nosplit
   444  //go:nowritebarrierrec
   445  func badmorestackgsignal() {
   446  	sp := stringStructOf(&badmorestackgsignalMsg)
   447  	write(2, sp.str, int32(sp.len))
   448  }
   449  
   450  //go:nosplit
   451  func badctxt() {
   452  	throw("ctxt != 0")
   453  }
   454  
   455  func lockedOSThread() bool {
   456  	gp := getg()
   457  	return gp.lockedm != 0 && gp.m.lockedg != 0
   458  }
   459  
   460  var (
   461  	allgs    []*g
   462  	allglock mutex
   463  )
   464  
   465  func allgadd(gp *g) {
   466  	if readgstatus(gp) == _Gidle {
   467  		throw("allgadd: bad status Gidle")
   468  	}
   469  
   470  	lock(&allglock)
   471  	allgs = append(allgs, gp)
   472  	allglen = uintptr(len(allgs))
   473  	unlock(&allglock)
   474  }
   475  
   476  const (
   477  	// Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
   478  	// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
   479  	_GoidCacheBatch = 16
   480  )
   481  
   482  // cpuinit extracts the environment variable GODEBUG from the environment on
   483  // Unix-like operating systems and calls internal/cpu.Initialize.
   484  func cpuinit() {
   485  	const prefix = "GODEBUG="
   486  	var env string
   487  
   488  	switch GOOS {
   489  	case "aix", "darwin", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
   490  		cpu.DebugOptions = true
   491  
   492  		// Similar to goenv_unix but extracts the environment value for
   493  		// GODEBUG directly.
   494  		// TODO(moehrmann): remove when general goenvs() can be called before cpuinit()
   495  		n := int32(0)
   496  		for argv_index(argv, argc+1+n) != nil {
   497  			n++
   498  		}
   499  
   500  		for i := int32(0); i < n; i++ {
   501  			p := argv_index(argv, argc+1+i)
   502  			s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
   503  
   504  			if hasPrefix(s, prefix) {
   505  				env = gostring(p)[len(prefix):]
   506  				break
   507  			}
   508  		}
   509  	}
   510  
   511  	cpu.Initialize(env)
   512  
   513  	// Support cpu feature variables are used in code generated by the compiler
   514  	// to guard execution of instructions that can not be assumed to be always supported.
   515  	x86HasPOPCNT = cpu.X86.HasPOPCNT
   516  	x86HasSSE41 = cpu.X86.HasSSE41
   517  
   518  	arm64HasATOMICS = cpu.ARM64.HasATOMICS
   519  }
   520  
   521  // The bootstrap sequence is:
   522  //
   523  //	call osinit
   524  //	call schedinit
   525  //	make & queue new G
   526  //	call runtime·mstart
   527  //
   528  // The new G calls runtime·main.
   529  func schedinit() {
   530  	// raceinit must be the first call to race detector.
   531  	// In particular, it must be done before mallocinit below calls racemapshadow.
   532  	_g_ := getg()
   533  	if raceenabled {
   534  		_g_.racectx, raceprocctx0 = raceinit()
   535  	}
   536  
   537  	sched.maxmcount = 10000
   538  
   539  	tracebackinit()
   540  	moduledataverify()
   541  	stackinit()
   542  	mallocinit()
   543  	mcommoninit(_g_.m)
   544  	cpuinit()       // must run before alginit
   545  	alginit()       // maps must not be used before this call
   546  	modulesinit()   // provides activeModules
   547  	typelinksinit() // uses maps, activeModules
   548  	itabsinit()     // uses activeModules
   549  
   550  	msigsave(_g_.m)
   551  	initSigmask = _g_.m.sigmask
   552  
   553  	goargs()
   554  	goenvs()
   555  	parsedebugvars()
   556  	gcinit()
   557  
   558  	sched.lastpoll = uint64(nanotime())
   559  	procs := ncpu
   560  	if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
   561  		procs = n
   562  	}
   563  	if procresize(procs) != nil {
   564  		throw("unknown runnable goroutine during bootstrap")
   565  	}
   566  
   567  	// For cgocheck > 1, we turn on the write barrier at all times
   568  	// and check all pointer writes. We can't do this until after
   569  	// procresize because the write barrier needs a P.
   570  	if debug.cgocheck > 1 {
   571  		writeBarrier.cgo = true
   572  		writeBarrier.enabled = true
   573  		for _, p := range allp {
   574  			p.wbBuf.reset()
   575  		}
   576  	}
   577  
   578  	if buildVersion == "" {
   579  		// Condition should never trigger. This code just serves
   580  		// to ensure runtime·buildVersion is kept in the resulting binary.
   581  		buildVersion = "unknown"
   582  	}
   583  	if len(modinfo) == 1 {
   584  		// Condition should never trigger. This code just serves
   585  		// to ensure runtime·modinfo is kept in the resulting binary.
   586  		modinfo = ""
   587  	}
   588  }
   589  
   590  func dumpgstatus(gp *g) {
   591  	_g_ := getg()
   592  	print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   593  	print("runtime:  g:  g=", _g_, ", goid=", _g_.goid, ",  g->atomicstatus=", readgstatus(_g_), "\n")
   594  }
   595  
   596  func checkmcount() {
   597  	// sched lock is held
   598  	if mcount() > sched.maxmcount {
   599  		print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
   600  		throw("thread exhaustion")
   601  	}
   602  }
   603  
   604  func mcommoninit(mp *m) {
   605  	_g_ := getg()
   606  
   607  	// g0 stack won't make sense for user (and is not necessary unwindable).
   608  	if _g_ != _g_.m.g0 {
   609  		callers(1, mp.createstack[:])
   610  	}
   611  
   612  	lock(&sched.lock)
   613  	if sched.mnext+1 < sched.mnext {
   614  		throw("runtime: thread ID overflow")
   615  	}
   616  	mp.id = sched.mnext
   617  	sched.mnext++
   618  	checkmcount()
   619  
   620  	mp.fastrand[0] = 1597334677 * uint32(mp.id)
   621  	mp.fastrand[1] = uint32(cputicks())
   622  	if mp.fastrand[0]|mp.fastrand[1] == 0 {
   623  		mp.fastrand[1] = 1
   624  	}
   625  
   626  	mpreinit(mp)
   627  	if mp.gsignal != nil {
   628  		mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
   629  	}
   630  
   631  	// Add to allm so garbage collector doesn't free g->m
   632  	// when it is just in a register or thread-local storage.
   633  	mp.alllink = allm
   634  
   635  	// NumCgoCall() iterates over allm w/o schedlock,
   636  	// so we need to publish it safely.
   637  	atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
   638  	unlock(&sched.lock)
   639  
   640  	// Allocate memory to hold a cgo traceback if the cgo call crashes.
   641  	if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
   642  		mp.cgoCallers = new(cgoCallers)
   643  	}
   644  }
   645  
   646  // Mark gp ready to run.
   647  func ready(gp *g, traceskip int, next bool) {
   648  	if trace.enabled {
   649  		traceGoUnpark(gp, traceskip)
   650  	}
   651  
   652  	status := readgstatus(gp)
   653  
   654  	// Mark runnable.
   655  	_g_ := getg()
   656  	mp := acquirem() // disable preemption because it can be holding p in a local var
   657  	if status&^_Gscan != _Gwaiting {
   658  		dumpgstatus(gp)
   659  		throw("bad g->status in ready")
   660  	}
   661  
   662  	// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
   663  	casgstatus(gp, _Gwaiting, _Grunnable)
   664  	runqput(_g_.m.p.ptr(), gp, next)
   665  	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
   666  		wakep()
   667  	}
   668  	releasem(mp)
   669  }
   670  
   671  // freezeStopWait is a large value that freezetheworld sets
   672  // sched.stopwait to in order to request that all Gs permanently stop.
   673  const freezeStopWait = 0x7fffffff
   674  
   675  // freezing is set to non-zero if the runtime is trying to freeze the
   676  // world.
   677  var freezing uint32
   678  
   679  // Similar to stopTheWorld but best-effort and can be called several times.
   680  // There is no reverse operation, used during crashing.
   681  // This function must not lock any mutexes.
   682  func freezetheworld() {
   683  	atomic.Store(&freezing, 1)
   684  	// stopwait and preemption requests can be lost
   685  	// due to races with concurrently executing threads,
   686  	// so try several times
   687  	for i := 0; i < 5; i++ {
   688  		// this should tell the scheduler to not start any new goroutines
   689  		sched.stopwait = freezeStopWait
   690  		atomic.Store(&sched.gcwaiting, 1)
   691  		// this should stop running goroutines
   692  		if !preemptall() {
   693  			break // no running goroutines
   694  		}
   695  		usleep(1000)
   696  	}
   697  	// to be sure
   698  	usleep(1000)
   699  	preemptall()
   700  	usleep(1000)
   701  }
   702  
   703  // All reads and writes of g's status go through readgstatus, casgstatus
   704  // castogscanstatus, casfrom_Gscanstatus.
   705  //go:nosplit
   706  func readgstatus(gp *g) uint32 {
   707  	return atomic.Load(&gp.atomicstatus)
   708  }
   709  
   710  // Ownership of gcscanvalid:
   711  //
   712  // If gp is running (meaning status == _Grunning or _Grunning|_Gscan),
   713  // then gp owns gp.gcscanvalid, and other goroutines must not modify it.
   714  //
   715  // Otherwise, a second goroutine can lock the scan state by setting _Gscan
   716  // in the status bit and then modify gcscanvalid, and then unlock the scan state.
   717  //
   718  // Note that the first condition implies an exception to the second:
   719  // if a second goroutine changes gp's status to _Grunning|_Gscan,
   720  // that second goroutine still does not have the right to modify gcscanvalid.
   721  
   722  // The Gscanstatuses are acting like locks and this releases them.
   723  // If it proves to be a performance hit we should be able to make these
   724  // simple atomic stores but for now we are going to throw if
   725  // we see an inconsistent state.
   726  func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
   727  	success := false
   728  
   729  	// Check that transition is valid.
   730  	switch oldval {
   731  	default:
   732  		print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
   733  		dumpgstatus(gp)
   734  		throw("casfrom_Gscanstatus:top gp->status is not in scan state")
   735  	case _Gscanrunnable,
   736  		_Gscanwaiting,
   737  		_Gscanrunning,
   738  		_Gscansyscall:
   739  		if newval == oldval&^_Gscan {
   740  			success = atomic.Cas(&gp.atomicstatus, oldval, newval)
   741  		}
   742  	}
   743  	if !success {
   744  		print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
   745  		dumpgstatus(gp)
   746  		throw("casfrom_Gscanstatus: gp->status is not in scan state")
   747  	}
   748  }
   749  
   750  // This will return false if the gp is not in the expected status and the cas fails.
   751  // This acts like a lock acquire while the casfromgstatus acts like a lock release.
   752  func castogscanstatus(gp *g, oldval, newval uint32) bool {
   753  	switch oldval {
   754  	case _Grunnable,
   755  		_Grunning,
   756  		_Gwaiting,
   757  		_Gsyscall:
   758  		if newval == oldval|_Gscan {
   759  			return atomic.Cas(&gp.atomicstatus, oldval, newval)
   760  		}
   761  	}
   762  	print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
   763  	throw("castogscanstatus")
   764  	panic("not reached")
   765  }
   766  
   767  // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
   768  // and casfrom_Gscanstatus instead.
   769  // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
   770  // put it in the Gscan state is finished.
   771  //go:nosplit
   772  func casgstatus(gp *g, oldval, newval uint32) {
   773  	if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
   774  		systemstack(func() {
   775  			print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
   776  			throw("casgstatus: bad incoming values")
   777  		})
   778  	}
   779  
   780  	if oldval == _Grunning && gp.gcscanvalid {
   781  		// If oldvall == _Grunning, then the actual status must be
   782  		// _Grunning or _Grunning|_Gscan; either way,
   783  		// we own gp.gcscanvalid, so it's safe to read.
   784  		// gp.gcscanvalid must not be true when we are running.
   785  		systemstack(func() {
   786  			print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
   787  			throw("casgstatus")
   788  		})
   789  	}
   790  
   791  	// See https://golang.org/cl/21503 for justification of the yield delay.
   792  	const yieldDelay = 5 * 1000
   793  	var nextYield int64
   794  
   795  	// loop if gp->atomicstatus is in a scan state giving
   796  	// GC time to finish and change the state to oldval.
   797  	for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
   798  		if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
   799  			throw("casgstatus: waiting for Gwaiting but is Grunnable")
   800  		}
   801  		// Help GC if needed.
   802  		// if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
   803  		// 	gp.preemptscan = false
   804  		// 	systemstack(func() {
   805  		// 		gcphasework(gp)
   806  		// 	})
   807  		// }
   808  		// But meanwhile just yield.
   809  		if i == 0 {
   810  			nextYield = nanotime() + yieldDelay
   811  		}
   812  		if nanotime() < nextYield {
   813  			for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
   814  				procyield(1)
   815  			}
   816  		} else {
   817  			osyield()
   818  			nextYield = nanotime() + yieldDelay/2
   819  		}
   820  	}
   821  	if newval == _Grunning {
   822  		gp.gcscanvalid = false
   823  	}
   824  }
   825  
   826  // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable.
   827  // Returns old status. Cannot call casgstatus directly, because we are racing with an
   828  // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus,
   829  // it might have become Grunnable by the time we get to the cas. If we called casgstatus,
   830  // it would loop waiting for the status to go back to Gwaiting, which it never will.
   831  //go:nosplit
   832  func casgcopystack(gp *g) uint32 {
   833  	for {
   834  		oldstatus := readgstatus(gp) &^ _Gscan
   835  		if oldstatus != _Gwaiting && oldstatus != _Grunnable {
   836  			throw("copystack: bad status, not Gwaiting or Grunnable")
   837  		}
   838  		if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
   839  			return oldstatus
   840  		}
   841  	}
   842  }
   843  
   844  // scang blocks until gp's stack has been scanned.
   845  // It might be scanned by scang or it might be scanned by the goroutine itself.
   846  // Either way, the stack scan has completed when scang returns.
   847  func scang(gp *g, gcw *gcWork) {
   848  	// Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone.
   849  	// Nothing is racing with us now, but gcscandone might be set to true left over
   850  	// from an earlier round of stack scanning (we scan twice per GC).
   851  	// We use gcscandone to record whether the scan has been done during this round.
   852  
   853  	gp.gcscandone = false
   854  
   855  	// See https://golang.org/cl/21503 for justification of the yield delay.
   856  	const yieldDelay = 10 * 1000
   857  	var nextYield int64
   858  
   859  	// Endeavor to get gcscandone set to true,
   860  	// either by doing the stack scan ourselves or by coercing gp to scan itself.
   861  	// gp.gcscandone can transition from false to true when we're not looking
   862  	// (if we asked for preemption), so any time we lock the status using
   863  	// castogscanstatus we have to double-check that the scan is still not done.
   864  loop:
   865  	for i := 0; !gp.gcscandone; i++ {
   866  		switch s := readgstatus(gp); s {
   867  		default:
   868  			dumpgstatus(gp)
   869  			throw("stopg: invalid status")
   870  
   871  		case _Gdead:
   872  			// No stack.
   873  			gp.gcscandone = true
   874  			break loop
   875  
   876  		case _Gcopystack:
   877  		// Stack being switched. Go around again.
   878  
   879  		case _Grunnable, _Gsyscall, _Gwaiting:
   880  			// Claim goroutine by setting scan bit.
   881  			// Racing with execution or readying of gp.
   882  			// The scan bit keeps them from running
   883  			// the goroutine until we're done.
   884  			if castogscanstatus(gp, s, s|_Gscan) {
   885  				if !gp.gcscandone {
   886  					scanstack(gp, gcw)
   887  					gp.gcscandone = true
   888  				}
   889  				restartg(gp)
   890  				break loop
   891  			}
   892  
   893  		case _Gscanwaiting:
   894  		// newstack is doing a scan for us right now. Wait.
   895  
   896  		case _Grunning:
   897  			// Goroutine running. Try to preempt execution so it can scan itself.
   898  			// The preemption handler (in newstack) does the actual scan.
   899  
   900  			// Optimization: if there is already a pending preemption request
   901  			// (from the previous loop iteration), don't bother with the atomics.
   902  			if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt {
   903  				break
   904  			}
   905  
   906  			// Ask for preemption and self scan.
   907  			if castogscanstatus(gp, _Grunning, _Gscanrunning) {
   908  				if !gp.gcscandone {
   909  					gp.preemptscan = true
   910  					gp.preempt = true
   911  					gp.stackguard0 = stackPreempt
   912  				}
   913  				casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
   914  			}
   915  		}
   916  
   917  		if i == 0 {
   918  			nextYield = nanotime() + yieldDelay
   919  		}
   920  		if nanotime() < nextYield {
   921  			procyield(10)
   922  		} else {
   923  			osyield()
   924  			nextYield = nanotime() + yieldDelay/2
   925  		}
   926  	}
   927  
   928  	gp.preemptscan = false // cancel scan request if no longer needed
   929  }
   930  
   931  // The GC requests that this routine be moved from a scanmumble state to a mumble state.
   932  func restartg(gp *g) {
   933  	s := readgstatus(gp)
   934  	switch s {
   935  	default:
   936  		dumpgstatus(gp)
   937  		throw("restartg: unexpected status")
   938  
   939  	case _Gdead:
   940  	// ok
   941  
   942  	case _Gscanrunnable,
   943  		_Gscanwaiting,
   944  		_Gscansyscall:
   945  		casfrom_Gscanstatus(gp, s, s&^_Gscan)
   946  	}
   947  }
   948  
   949  // stopTheWorld stops all P's from executing goroutines, interrupting
   950  // all goroutines at GC safe points and records reason as the reason
   951  // for the stop. On return, only the current goroutine's P is running.
   952  // stopTheWorld must not be called from a system stack and the caller
   953  // must not hold worldsema. The caller must call startTheWorld when
   954  // other P's should resume execution.
   955  //
   956  // stopTheWorld is safe for multiple goroutines to call at the
   957  // same time. Each will execute its own stop, and the stops will
   958  // be serialized.
   959  //
   960  // This is also used by routines that do stack dumps. If the system is
   961  // in panic or being exited, this may not reliably stop all
   962  // goroutines.
   963  func stopTheWorld(reason string) {
   964  	semacquire(&worldsema)
   965  	getg().m.preemptoff = reason
   966  	systemstack(stopTheWorldWithSema)
   967  }
   968  
   969  // startTheWorld undoes the effects of stopTheWorld.
   970  func startTheWorld() {
   971  	systemstack(func() { startTheWorldWithSema(false) })
   972  	// worldsema must be held over startTheWorldWithSema to ensure
   973  	// gomaxprocs cannot change while worldsema is held.
   974  	semrelease(&worldsema)
   975  	getg().m.preemptoff = ""
   976  }
   977  
   978  // Holding worldsema grants an M the right to try to stop the world
   979  // and prevents gomaxprocs from changing concurrently.
   980  var worldsema uint32 = 1
   981  
   982  // stopTheWorldWithSema is the core implementation of stopTheWorld.
   983  // The caller is responsible for acquiring worldsema and disabling
   984  // preemption first and then should stopTheWorldWithSema on the system
   985  // stack:
   986  //
   987  //	semacquire(&worldsema, 0)
   988  //	m.preemptoff = "reason"
   989  //	systemstack(stopTheWorldWithSema)
   990  //
   991  // When finished, the caller must either call startTheWorld or undo
   992  // these three operations separately:
   993  //
   994  //	m.preemptoff = ""
   995  //	systemstack(startTheWorldWithSema)
   996  //	semrelease(&worldsema)
   997  //
   998  // It is allowed to acquire worldsema once and then execute multiple
   999  // startTheWorldWithSema/stopTheWorldWithSema pairs.
  1000  // Other P's are able to execute between successive calls to
  1001  // startTheWorldWithSema and stopTheWorldWithSema.
  1002  // Holding worldsema causes any other goroutines invoking
  1003  // stopTheWorld to block.
  1004  func stopTheWorldWithSema() {
  1005  	_g_ := getg()
  1006  
  1007  	// If we hold a lock, then we won't be able to stop another M
  1008  	// that is blocked trying to acquire the lock.
  1009  	if _g_.m.locks > 0 {
  1010  		throw("stopTheWorld: holding locks")
  1011  	}
  1012  
  1013  	lock(&sched.lock)
  1014  	sched.stopwait = gomaxprocs
  1015  	atomic.Store(&sched.gcwaiting, 1)
  1016  	preemptall()
  1017  	// stop current P
  1018  	_g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
  1019  	sched.stopwait--
  1020  	// try to retake all P's in Psyscall status
  1021  	for _, p := range allp {
  1022  		s := p.status
  1023  		if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
  1024  			if trace.enabled {
  1025  				traceGoSysBlock(p)
  1026  				traceProcStop(p)
  1027  			}
  1028  			p.syscalltick++
  1029  			sched.stopwait--
  1030  		}
  1031  	}
  1032  	// stop idle P's
  1033  	for {
  1034  		p := pidleget()
  1035  		if p == nil {
  1036  			break
  1037  		}
  1038  		p.status = _Pgcstop
  1039  		sched.stopwait--
  1040  	}
  1041  	wait := sched.stopwait > 0
  1042  	unlock(&sched.lock)
  1043  
  1044  	// wait for remaining P's to stop voluntarily
  1045  	if wait {
  1046  		for {
  1047  			// wait for 100us, then try to re-preempt in case of any races
  1048  			if notetsleep(&sched.stopnote, 100*1000) {
  1049  				noteclear(&sched.stopnote)
  1050  				break
  1051  			}
  1052  			preemptall()
  1053  		}
  1054  	}
  1055  
  1056  	// sanity checks
  1057  	bad := ""
  1058  	if sched.stopwait != 0 {
  1059  		bad = "stopTheWorld: not stopped (stopwait != 0)"
  1060  	} else {
  1061  		for _, p := range allp {
  1062  			if p.status != _Pgcstop {
  1063  				bad = "stopTheWorld: not stopped (status != _Pgcstop)"
  1064  			}
  1065  		}
  1066  	}
  1067  	if atomic.Load(&freezing) != 0 {
  1068  		// Some other thread is panicking. This can cause the
  1069  		// sanity checks above to fail if the panic happens in
  1070  		// the signal handler on a stopped thread. Either way,
  1071  		// we should halt this thread.
  1072  		lock(&deadlock)
  1073  		lock(&deadlock)
  1074  	}
  1075  	if bad != "" {
  1076  		throw(bad)
  1077  	}
  1078  }
  1079  
  1080  func startTheWorldWithSema(emitTraceEvent bool) int64 {
  1081  	mp := acquirem() // disable preemption because it can be holding p in a local var
  1082  	if netpollinited() {
  1083  		list := netpoll(false) // non-blocking
  1084  		injectglist(&list)
  1085  	}
  1086  	lock(&sched.lock)
  1087  
  1088  	procs := gomaxprocs
  1089  	if newprocs != 0 {
  1090  		procs = newprocs
  1091  		newprocs = 0
  1092  	}
  1093  	p1 := procresize(procs)
  1094  	sched.gcwaiting = 0
  1095  	if sched.sysmonwait != 0 {
  1096  		sched.sysmonwait = 0
  1097  		notewakeup(&sched.sysmonnote)
  1098  	}
  1099  	unlock(&sched.lock)
  1100  
  1101  	for p1 != nil {
  1102  		p := p1
  1103  		p1 = p1.link.ptr()
  1104  		if p.m != 0 {
  1105  			mp := p.m.ptr()
  1106  			p.m = 0
  1107  			if mp.nextp != 0 {
  1108  				throw("startTheWorld: inconsistent mp->nextp")
  1109  			}
  1110  			mp.nextp.set(p)
  1111  			notewakeup(&mp.park)
  1112  		} else {
  1113  			// Start M to run P.  Do not start another M below.
  1114  			newm(nil, p)
  1115  		}
  1116  	}
  1117  
  1118  	// Capture start-the-world time before doing clean-up tasks.
  1119  	startTime := nanotime()
  1120  	if emitTraceEvent {
  1121  		traceGCSTWDone()
  1122  	}
  1123  
  1124  	// Wakeup an additional proc in case we have excessive runnable goroutines
  1125  	// in local queues or in the global queue. If we don't, the proc will park itself.
  1126  	// If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
  1127  	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
  1128  		wakep()
  1129  	}
  1130  
  1131  	releasem(mp)
  1132  
  1133  	return startTime
  1134  }
  1135  
  1136  // mstart is the entry-point for new Ms.
  1137  //
  1138  // This must not split the stack because we may not even have stack
  1139  // bounds set up yet.
  1140  //
  1141  // May run during STW (because it doesn't have a P yet), so write
  1142  // barriers are not allowed.
  1143  //
  1144  //go:nosplit
  1145  //go:nowritebarrierrec
  1146  func mstart() {
  1147  	_g_ := getg()
  1148  
  1149  	osStack := _g_.stack.lo == 0
  1150  	if osStack {
  1151  		// Initialize stack bounds from system stack.
  1152  		// Cgo may have left stack size in stack.hi.
  1153  		// minit may update the stack bounds.
  1154  		size := _g_.stack.hi
  1155  		if size == 0 {
  1156  			size = 8192 * sys.StackGuardMultiplier
  1157  		}
  1158  		_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
  1159  		_g_.stack.lo = _g_.stack.hi - size + 1024
  1160  	}
  1161  	// Initialize stack guard so that we can start calling regular
  1162  	// Go code.
  1163  	_g_.stackguard0 = _g_.stack.lo + _StackGuard
  1164  	// This is the g0, so we can also call go:systemstack
  1165  	// functions, which check stackguard1.
  1166  	_g_.stackguard1 = _g_.stackguard0
  1167  	mstart1()
  1168  
  1169  	// Exit this thread.
  1170  	if GOOS == "windows" || GOOS == "solaris" || GOOS == "illumos" || GOOS == "plan9" || GOOS == "darwin" || GOOS == "aix" {
  1171  		// Windows, Solaris, illumos, Darwin, AIX and Plan 9 always system-allocate
  1172  		// the stack, but put it in _g_.stack before mstart,
  1173  		// so the logic above hasn't set osStack yet.
  1174  		osStack = true
  1175  	}
  1176  	mexit(osStack)
  1177  }
  1178  
  1179  func mstart1() {
  1180  	_g_ := getg()
  1181  
  1182  	if _g_ != _g_.m.g0 {
  1183  		throw("bad runtime·mstart")
  1184  	}
  1185  
  1186  	// Record the caller for use as the top of stack in mcall and
  1187  	// for terminating the thread.
  1188  	// We're never coming back to mstart1 after we call schedule,
  1189  	// so other calls can reuse the current frame.
  1190  	save(getcallerpc(), getcallersp())
  1191  	asminit()
  1192  	minit()
  1193  
  1194  	// Install signal handlers; after minit so that minit can
  1195  	// prepare the thread to be able to handle the signals.
  1196  	if _g_.m == &m0 {
  1197  		mstartm0()
  1198  	}
  1199  
  1200  	if fn := _g_.m.mstartfn; fn != nil {
  1201  		fn()
  1202  	}
  1203  
  1204  	if _g_.m != &m0 {
  1205  		acquirep(_g_.m.nextp.ptr())
  1206  		_g_.m.nextp = 0
  1207  	}
  1208  	schedule()
  1209  }
  1210  
  1211  // mstartm0 implements part of mstart1 that only runs on the m0.
  1212  //
  1213  // Write barriers are allowed here because we know the GC can't be
  1214  // running yet, so they'll be no-ops.
  1215  //
  1216  //go:yeswritebarrierrec
  1217  func mstartm0() {
  1218  	// Create an extra M for callbacks on threads not created by Go.
  1219  	// An extra M is also needed on Windows for callbacks created by
  1220  	// syscall.NewCallback. See issue #6751 for details.
  1221  	if (iscgo || GOOS == "windows") && !cgoHasExtraM {
  1222  		cgoHasExtraM = true
  1223  		newextram()
  1224  	}
  1225  	initsig(false)
  1226  }
  1227  
  1228  // mexit tears down and exits the current thread.
  1229  //
  1230  // Don't call this directly to exit the thread, since it must run at
  1231  // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to
  1232  // unwind the stack to the point that exits the thread.
  1233  //
  1234  // It is entered with m.p != nil, so write barriers are allowed. It
  1235  // will release the P before exiting.
  1236  //
  1237  //go:yeswritebarrierrec
  1238  func mexit(osStack bool) {
  1239  	g := getg()
  1240  	m := g.m
  1241  
  1242  	if m == &m0 {
  1243  		// This is the main thread. Just wedge it.
  1244  		//
  1245  		// On Linux, exiting the main thread puts the process
  1246  		// into a non-waitable zombie state. On Plan 9,
  1247  		// exiting the main thread unblocks wait even though
  1248  		// other threads are still running. On Solaris we can
  1249  		// neither exitThread nor return from mstart. Other
  1250  		// bad things probably happen on other platforms.
  1251  		//
  1252  		// We could try to clean up this M more before wedging
  1253  		// it, but that complicates signal handling.
  1254  		handoffp(releasep())
  1255  		lock(&sched.lock)
  1256  		sched.nmfreed++
  1257  		checkdead()
  1258  		unlock(&sched.lock)
  1259  		notesleep(&m.park)
  1260  		throw("locked m0 woke up")
  1261  	}
  1262  
  1263  	sigblock()
  1264  	unminit()
  1265  
  1266  	// Free the gsignal stack.
  1267  	if m.gsignal != nil {
  1268  		stackfree(m.gsignal.stack)
  1269  	}
  1270  
  1271  	// Remove m from allm.
  1272  	lock(&sched.lock)
  1273  	for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
  1274  		if *pprev == m {
  1275  			*pprev = m.alllink
  1276  			goto found
  1277  		}
  1278  	}
  1279  	throw("m not found in allm")
  1280  found:
  1281  	if !osStack {
  1282  		// Delay reaping m until it's done with the stack.
  1283  		//
  1284  		// If this is using an OS stack, the OS will free it
  1285  		// so there's no need for reaping.
  1286  		atomic.Store(&m.freeWait, 1)
  1287  		// Put m on the free list, though it will not be reaped until
  1288  		// freeWait is 0. Note that the free list must not be linked
  1289  		// through alllink because some functions walk allm without
  1290  		// locking, so may be using alllink.
  1291  		m.freelink = sched.freem
  1292  		sched.freem = m
  1293  	}
  1294  	unlock(&sched.lock)
  1295  
  1296  	// Release the P.
  1297  	handoffp(releasep())
  1298  	// After this point we must not have write barriers.
  1299  
  1300  	// Invoke the deadlock detector. This must happen after
  1301  	// handoffp because it may have started a new M to take our
  1302  	// P's work.
  1303  	lock(&sched.lock)
  1304  	sched.nmfreed++
  1305  	checkdead()
  1306  	unlock(&sched.lock)
  1307  
  1308  	if osStack {
  1309  		// Return from mstart and let the system thread
  1310  		// library free the g0 stack and terminate the thread.
  1311  		return
  1312  	}
  1313  
  1314  	// mstart is the thread's entry point, so there's nothing to
  1315  	// return to. Exit the thread directly. exitThread will clear
  1316  	// m.freeWait when it's done with the stack and the m can be
  1317  	// reaped.
  1318  	exitThread(&m.freeWait)
  1319  }
  1320  
  1321  // forEachP calls fn(p) for every P p when p reaches a GC safe point.
  1322  // If a P is currently executing code, this will bring the P to a GC
  1323  // safe point and execute fn on that P. If the P is not executing code
  1324  // (it is idle or in a syscall), this will call fn(p) directly while
  1325  // preventing the P from exiting its state. This does not ensure that
  1326  // fn will run on every CPU executing Go code, but it acts as a global
  1327  // memory barrier. GC uses this as a "ragged barrier."
  1328  //
  1329  // The caller must hold worldsema.
  1330  //
  1331  //go:systemstack
  1332  func forEachP(fn func(*p)) {
  1333  	mp := acquirem()
  1334  	_p_ := getg().m.p.ptr()
  1335  
  1336  	lock(&sched.lock)
  1337  	if sched.safePointWait != 0 {
  1338  		throw("forEachP: sched.safePointWait != 0")
  1339  	}
  1340  	sched.safePointWait = gomaxprocs - 1
  1341  	sched.safePointFn = fn
  1342  
  1343  	// Ask all Ps to run the safe point function.
  1344  	for _, p := range allp {
  1345  		if p != _p_ {
  1346  			atomic.Store(&p.runSafePointFn, 1)
  1347  		}
  1348  	}
  1349  	preemptall()
  1350  
  1351  	// Any P entering _Pidle or _Psyscall from now on will observe
  1352  	// p.runSafePointFn == 1 and will call runSafePointFn when
  1353  	// changing its status to _Pidle/_Psyscall.
  1354  
  1355  	// Run safe point function for all idle Ps. sched.pidle will
  1356  	// not change because we hold sched.lock.
  1357  	for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
  1358  		if atomic.Cas(&p.runSafePointFn, 1, 0) {
  1359  			fn(p)
  1360  			sched.safePointWait--
  1361  		}
  1362  	}
  1363  
  1364  	wait := sched.safePointWait > 0
  1365  	unlock(&sched.lock)
  1366  
  1367  	// Run fn for the current P.
  1368  	fn(_p_)
  1369  
  1370  	// Force Ps currently in _Psyscall into _Pidle and hand them
  1371  	// off to induce safe point function execution.
  1372  	for _, p := range allp {
  1373  		s := p.status
  1374  		if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
  1375  			if trace.enabled {
  1376  				traceGoSysBlock(p)
  1377  				traceProcStop(p)
  1378  			}
  1379  			p.syscalltick++
  1380  			handoffp(p)
  1381  		}
  1382  	}
  1383  
  1384  	// Wait for remaining Ps to run fn.
  1385  	if wait {
  1386  		for {
  1387  			// Wait for 100us, then try to re-preempt in
  1388  			// case of any races.
  1389  			//
  1390  			// Requires system stack.
  1391  			if notetsleep(&sched.safePointNote, 100*1000) {
  1392  				noteclear(&sched.safePointNote)
  1393  				break
  1394  			}
  1395  			preemptall()
  1396  		}
  1397  	}
  1398  	if sched.safePointWait != 0 {
  1399  		throw("forEachP: not done")
  1400  	}
  1401  	for _, p := range allp {
  1402  		if p.runSafePointFn != 0 {
  1403  			throw("forEachP: P did not run fn")
  1404  		}
  1405  	}
  1406  
  1407  	lock(&sched.lock)
  1408  	sched.safePointFn = nil
  1409  	unlock(&sched.lock)
  1410  	releasem(mp)
  1411  }
  1412  
  1413  // runSafePointFn runs the safe point function, if any, for this P.
  1414  // This should be called like
  1415  //
  1416  //     if getg().m.p.runSafePointFn != 0 {
  1417  //         runSafePointFn()
  1418  //     }
  1419  //
  1420  // runSafePointFn must be checked on any transition in to _Pidle or
  1421  // _Psyscall to avoid a race where forEachP sees that the P is running
  1422  // just before the P goes into _Pidle/_Psyscall and neither forEachP
  1423  // nor the P run the safe-point function.
  1424  func runSafePointFn() {
  1425  	p := getg().m.p.ptr()
  1426  	// Resolve the race between forEachP running the safe-point
  1427  	// function on this P's behalf and this P running the
  1428  	// safe-point function directly.
  1429  	if !atomic.Cas(&p.runSafePointFn, 1, 0) {
  1430  		return
  1431  	}
  1432  	sched.safePointFn(p)
  1433  	lock(&sched.lock)
  1434  	sched.safePointWait--
  1435  	if sched.safePointWait == 0 {
  1436  		notewakeup(&sched.safePointNote)
  1437  	}
  1438  	unlock(&sched.lock)
  1439  }
  1440  
  1441  // When running with cgo, we call _cgo_thread_start
  1442  // to start threads for us so that we can play nicely with
  1443  // foreign code.
  1444  var cgoThreadStart unsafe.Pointer
  1445  
  1446  type cgothreadstart struct {
  1447  	g   guintptr
  1448  	tls *uint64
  1449  	fn  unsafe.Pointer
  1450  }
  1451  
  1452  // Allocate a new m unassociated with any thread.
  1453  // Can use p for allocation context if needed.
  1454  // fn is recorded as the new m's m.mstartfn.
  1455  //
  1456  // This function is allowed to have write barriers even if the caller
  1457  // isn't because it borrows _p_.
  1458  //
  1459  //go:yeswritebarrierrec
  1460  func allocm(_p_ *p, fn func()) *m {
  1461  	_g_ := getg()
  1462  	acquirem() // disable GC because it can be called from sysmon
  1463  	if _g_.m.p == 0 {
  1464  		acquirep(_p_) // temporarily borrow p for mallocs in this function
  1465  	}
  1466  
  1467  	// Release the free M list. We need to do this somewhere and
  1468  	// this may free up a stack we can use.
  1469  	if sched.freem != nil {
  1470  		lock(&sched.lock)
  1471  		var newList *m
  1472  		for freem := sched.freem; freem != nil; {
  1473  			if freem.freeWait != 0 {
  1474  				next := freem.freelink
  1475  				freem.freelink = newList
  1476  				newList = freem
  1477  				freem = next
  1478  				continue
  1479  			}
  1480  			stackfree(freem.g0.stack)
  1481  			freem = freem.freelink
  1482  		}
  1483  		sched.freem = newList
  1484  		unlock(&sched.lock)
  1485  	}
  1486  
  1487  	mp := new(m)
  1488  	mp.mstartfn = fn
  1489  	mcommoninit(mp)
  1490  
  1491  	// In case of cgo or Solaris or illumos or Darwin, pthread_create will make us a stack.
  1492  	// Windows and Plan 9 will layout sched stack on OS stack.
  1493  	if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" || GOOS == "plan9" || GOOS == "darwin" {
  1494  		mp.g0 = malg(-1)
  1495  	} else {
  1496  		mp.g0 = malg(8192 * sys.StackGuardMultiplier)
  1497  	}
  1498  	mp.g0.m = mp
  1499  
  1500  	if _p_ == _g_.m.p.ptr() {
  1501  		releasep()
  1502  	}
  1503  	releasem(_g_.m)
  1504  
  1505  	return mp
  1506  }
  1507  
  1508  // needm is called when a cgo callback happens on a
  1509  // thread without an m (a thread not created by Go).
  1510  // In this case, needm is expected to find an m to use
  1511  // and return with m, g initialized correctly.
  1512  // Since m and g are not set now (likely nil, but see below)
  1513  // needm is limited in what routines it can call. In particular
  1514  // it can only call nosplit functions (textflag 7) and cannot
  1515  // do any scheduling that requires an m.
  1516  //
  1517  // In order to avoid needing heavy lifting here, we adopt
  1518  // the following strategy: there is a stack of available m's
  1519  // that can be stolen. Using compare-and-swap
  1520  // to pop from the stack has ABA races, so we simulate
  1521  // a lock by doing an exchange (via Casuintptr) to steal the stack
  1522  // head and replace the top pointer with MLOCKED (1).
  1523  // This serves as a simple spin lock that we can use even
  1524  // without an m. The thread that locks the stack in this way
  1525  // unlocks the stack by storing a valid stack head pointer.
  1526  //
  1527  // In order to make sure that there is always an m structure
  1528  // available to be stolen, we maintain the invariant that there
  1529  // is always one more than needed. At the beginning of the
  1530  // program (if cgo is in use) the list is seeded with a single m.
  1531  // If needm finds that it has taken the last m off the list, its job
  1532  // is - once it has installed its own m so that it can do things like
  1533  // allocate memory - to create a spare m and put it on the list.
  1534  //
  1535  // Each of these extra m's also has a g0 and a curg that are
  1536  // pressed into service as the scheduling stack and current
  1537  // goroutine for the duration of the cgo callback.
  1538  //
  1539  // When the callback is done with the m, it calls dropm to
  1540  // put the m back on the list.
  1541  //go:nosplit
  1542  func needm(x byte) {
  1543  	if (iscgo || GOOS == "windows") && !cgoHasExtraM {
  1544  		// Can happen if C/C++ code calls Go from a global ctor.
  1545  		// Can also happen on Windows if a global ctor uses a
  1546  		// callback created by syscall.NewCallback. See issue #6751
  1547  		// for details.
  1548  		//
  1549  		// Can not throw, because scheduler is not initialized yet.
  1550  		write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
  1551  		exit(1)
  1552  	}
  1553  
  1554  	// Lock extra list, take head, unlock popped list.
  1555  	// nilokay=false is safe here because of the invariant above,
  1556  	// that the extra list always contains or will soon contain
  1557  	// at least one m.
  1558  	mp := lockextra(false)
  1559  
  1560  	// Set needextram when we've just emptied the list,
  1561  	// so that the eventual call into cgocallbackg will
  1562  	// allocate a new m for the extra list. We delay the
  1563  	// allocation until then so that it can be done
  1564  	// after exitsyscall makes sure it is okay to be
  1565  	// running at all (that is, there's no garbage collection
  1566  	// running right now).
  1567  	mp.needextram = mp.schedlink == 0
  1568  	extraMCount--
  1569  	unlockextra(mp.schedlink.ptr())
  1570  
  1571  	// Save and block signals before installing g.
  1572  	// Once g is installed, any incoming signals will try to execute,
  1573  	// but we won't have the sigaltstack settings and other data
  1574  	// set up appropriately until the end of minit, which will
  1575  	// unblock the signals. This is the same dance as when
  1576  	// starting a new m to run Go code via newosproc.
  1577  	msigsave(mp)
  1578  	sigblock()
  1579  
  1580  	// Install g (= m->g0) and set the stack bounds
  1581  	// to match the current stack. We don't actually know
  1582  	// how big the stack is, like we don't know how big any
  1583  	// scheduling stack is, but we assume there's at least 32 kB,
  1584  	// which is more than enough for us.
  1585  	setg(mp.g0)
  1586  	_g_ := getg()
  1587  	_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
  1588  	_g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
  1589  	_g_.stackguard0 = _g_.stack.lo + _StackGuard
  1590  
  1591  	// Initialize this thread to use the m.
  1592  	asminit()
  1593  	minit()
  1594  
  1595  	// mp.curg is now a real goroutine.
  1596  	casgstatus(mp.curg, _Gdead, _Gsyscall)
  1597  	atomic.Xadd(&sched.ngsys, -1)
  1598  }
  1599  
  1600  var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
  1601  
  1602  // newextram allocates m's and puts them on the extra list.
  1603  // It is called with a working local m, so that it can do things
  1604  // like call schedlock and allocate.
  1605  func newextram() {
  1606  	c := atomic.Xchg(&extraMWaiters, 0)
  1607  	if c > 0 {
  1608  		for i := uint32(0); i < c; i++ {
  1609  			oneNewExtraM()
  1610  		}
  1611  	} else {
  1612  		// Make sure there is at least one extra M.
  1613  		mp := lockextra(true)
  1614  		unlockextra(mp)
  1615  		if mp == nil {
  1616  			oneNewExtraM()
  1617  		}
  1618  	}
  1619  }
  1620  
  1621  // oneNewExtraM allocates an m and puts it on the extra list.
  1622  func oneNewExtraM() {
  1623  	// Create extra goroutine locked to extra m.
  1624  	// The goroutine is the context in which the cgo callback will run.
  1625  	// The sched.pc will never be returned to, but setting it to
  1626  	// goexit makes clear to the traceback routines where
  1627  	// the goroutine stack ends.
  1628  	mp := allocm(nil, nil)
  1629  	gp := malg(4096)
  1630  	gp.sched.pc = funcPC(goexit) + sys.PCQuantum
  1631  	gp.sched.sp = gp.stack.hi
  1632  	gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame
  1633  	gp.sched.lr = 0
  1634  	gp.sched.g = guintptr(unsafe.Pointer(gp))
  1635  	gp.syscallpc = gp.sched.pc
  1636  	gp.syscallsp = gp.sched.sp
  1637  	gp.stktopsp = gp.sched.sp
  1638  	gp.gcscanvalid = true
  1639  	gp.gcscandone = true
  1640  	// malg returns status as _Gidle. Change to _Gdead before
  1641  	// adding to allg where GC can see it. We use _Gdead to hide
  1642  	// this from tracebacks and stack scans since it isn't a
  1643  	// "real" goroutine until needm grabs it.
  1644  	casgstatus(gp, _Gidle, _Gdead)
  1645  	gp.m = mp
  1646  	mp.curg = gp
  1647  	mp.lockedInt++
  1648  	mp.lockedg.set(gp)
  1649  	gp.lockedm.set(mp)
  1650  	gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
  1651  	if raceenabled {
  1652  		gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum)
  1653  	}
  1654  	// put on allg for garbage collector
  1655  	allgadd(gp)
  1656  
  1657  	// gp is now on the allg list, but we don't want it to be
  1658  	// counted by gcount. It would be more "proper" to increment
  1659  	// sched.ngfree, but that requires locking. Incrementing ngsys
  1660  	// has the same effect.
  1661  	atomic.Xadd(&sched.ngsys, +1)
  1662  
  1663  	// Add m to the extra list.
  1664  	mnext := lockextra(true)
  1665  	mp.schedlink.set(mnext)
  1666  	extraMCount++
  1667  	unlockextra(mp)
  1668  }
  1669  
  1670  // dropm is called when a cgo callback has called needm but is now
  1671  // done with the callback and returning back into the non-Go thread.
  1672  // It puts the current m back onto the extra list.
  1673  //
  1674  // The main expense here is the call to signalstack to release the
  1675  // m's signal stack, and then the call to needm on the next callback
  1676  // from this thread. It is tempting to try to save the m for next time,
  1677  // which would eliminate both these costs, but there might not be
  1678  // a next time: the current thread (which Go does not control) might exit.
  1679  // If we saved the m for that thread, there would be an m leak each time
  1680  // such a thread exited. Instead, we acquire and release an m on each
  1681  // call. These should typically not be scheduling operations, just a few
  1682  // atomics, so the cost should be small.
  1683  //
  1684  // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
  1685  // variable using pthread_key_create. Unlike the pthread keys we already use
  1686  // on OS X, this dummy key would never be read by Go code. It would exist
  1687  // only so that we could register at thread-exit-time destructor.
  1688  // That destructor would put the m back onto the extra list.
  1689  // This is purely a performance optimization. The current version,
  1690  // in which dropm happens on each cgo call, is still correct too.
  1691  // We may have to keep the current version on systems with cgo
  1692  // but without pthreads, like Windows.
  1693  func dropm() {
  1694  	// Clear m and g, and return m to the extra list.
  1695  	// After the call to setg we can only call nosplit functions
  1696  	// with no pointer manipulation.
  1697  	mp := getg().m
  1698  
  1699  	// Return mp.curg to dead state.
  1700  	casgstatus(mp.curg, _Gsyscall, _Gdead)
  1701  	atomic.Xadd(&sched.ngsys, +1)
  1702  
  1703  	// Block signals before unminit.
  1704  	// Unminit unregisters the signal handling stack (but needs g on some systems).
  1705  	// Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
  1706  	// It's important not to try to handle a signal between those two steps.
  1707  	sigmask := mp.sigmask
  1708  	sigblock()
  1709  	unminit()
  1710  
  1711  	mnext := lockextra(true)
  1712  	extraMCount++
  1713  	mp.schedlink.set(mnext)
  1714  
  1715  	setg(nil)
  1716  
  1717  	// Commit the release of mp.
  1718  	unlockextra(mp)
  1719  
  1720  	msigrestore(sigmask)
  1721  }
  1722  
  1723  // A helper function for EnsureDropM.
  1724  func getm() uintptr {
  1725  	return uintptr(unsafe.Pointer(getg().m))
  1726  }
  1727  
  1728  var extram uintptr
  1729  var extraMCount uint32 // Protected by lockextra
  1730  var extraMWaiters uint32
  1731  
  1732  // lockextra locks the extra list and returns the list head.
  1733  // The caller must unlock the list by storing a new list head
  1734  // to extram. If nilokay is true, then lockextra will
  1735  // return a nil list head if that's what it finds. If nilokay is false,
  1736  // lockextra will keep waiting until the list head is no longer nil.
  1737  //go:nosplit
  1738  func lockextra(nilokay bool) *m {
  1739  	const locked = 1
  1740  
  1741  	incr := false
  1742  	for {
  1743  		old := atomic.Loaduintptr(&extram)
  1744  		if old == locked {
  1745  			yield := osyield
  1746  			yield()
  1747  			continue
  1748  		}
  1749  		if old == 0 && !nilokay {
  1750  			if !incr {
  1751  				// Add 1 to the number of threads
  1752  				// waiting for an M.
  1753  				// This is cleared by newextram.
  1754  				atomic.Xadd(&extraMWaiters, 1)
  1755  				incr = true
  1756  			}
  1757  			usleep(1)
  1758  			continue
  1759  		}
  1760  		if atomic.Casuintptr(&extram, old, locked) {
  1761  			return (*m)(unsafe.Pointer(old))
  1762  		}
  1763  		yield := osyield
  1764  		yield()
  1765  		continue
  1766  	}
  1767  }
  1768  
  1769  //go:nosplit
  1770  func unlockextra(mp *m) {
  1771  	atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
  1772  }
  1773  
  1774  // execLock serializes exec and clone to avoid bugs or unspecified behaviour
  1775  // around exec'ing while creating/destroying threads.  See issue #19546.
  1776  var execLock rwmutex
  1777  
  1778  // newmHandoff contains a list of m structures that need new OS threads.
  1779  // This is used by newm in situations where newm itself can't safely
  1780  // start an OS thread.
  1781  var newmHandoff struct {
  1782  	lock mutex
  1783  
  1784  	// newm points to a list of M structures that need new OS
  1785  	// threads. The list is linked through m.schedlink.
  1786  	newm muintptr
  1787  
  1788  	// waiting indicates that wake needs to be notified when an m
  1789  	// is put on the list.
  1790  	waiting bool
  1791  	wake    note
  1792  
  1793  	// haveTemplateThread indicates that the templateThread has
  1794  	// been started. This is not protected by lock. Use cas to set
  1795  	// to 1.
  1796  	haveTemplateThread uint32
  1797  }
  1798  
  1799  // Create a new m. It will start off with a call to fn, or else the scheduler.
  1800  // fn needs to be static and not a heap allocated closure.
  1801  // May run with m.p==nil, so write barriers are not allowed.
  1802  //go:nowritebarrierrec
  1803  func newm(fn func(), _p_ *p) {
  1804  	mp := allocm(_p_, fn)
  1805  	mp.nextp.set(_p_)
  1806  	mp.sigmask = initSigmask
  1807  	if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
  1808  		// We're on a locked M or a thread that may have been
  1809  		// started by C. The kernel state of this thread may
  1810  		// be strange (the user may have locked it for that
  1811  		// purpose). We don't want to clone that into another
  1812  		// thread. Instead, ask a known-good thread to create
  1813  		// the thread for us.
  1814  		//
  1815  		// This is disabled on Plan 9. See golang.org/issue/22227.
  1816  		//
  1817  		// TODO: This may be unnecessary on Windows, which
  1818  		// doesn't model thread creation off fork.
  1819  		lock(&newmHandoff.lock)
  1820  		if newmHandoff.haveTemplateThread == 0 {
  1821  			throw("on a locked thread with no template thread")
  1822  		}
  1823  		mp.schedlink = newmHandoff.newm
  1824  		newmHandoff.newm.set(mp)
  1825  		if newmHandoff.waiting {
  1826  			newmHandoff.waiting = false
  1827  			notewakeup(&newmHandoff.wake)
  1828  		}
  1829  		unlock(&newmHandoff.lock)
  1830  		return
  1831  	}
  1832  	newm1(mp)
  1833  }
  1834  
  1835  func newm1(mp *m) {
  1836  	if iscgo {
  1837  		var ts cgothreadstart
  1838  		if _cgo_thread_start == nil {
  1839  			throw("_cgo_thread_start missing")
  1840  		}
  1841  		ts.g.set(mp.g0)
  1842  		ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
  1843  		ts.fn = unsafe.Pointer(funcPC(mstart))
  1844  		if msanenabled {
  1845  			msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
  1846  		}
  1847  		execLock.rlock() // Prevent process clone.
  1848  		asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
  1849  		execLock.runlock()
  1850  		return
  1851  	}
  1852  	execLock.rlock() // Prevent process clone.
  1853  	newosproc(mp)
  1854  	execLock.runlock()
  1855  }
  1856  
  1857  // startTemplateThread starts the template thread if it is not already
  1858  // running.
  1859  //
  1860  // The calling thread must itself be in a known-good state.
  1861  func startTemplateThread() {
  1862  	if GOARCH == "wasm" { // no threads on wasm yet
  1863  		return
  1864  	}
  1865  	if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
  1866  		return
  1867  	}
  1868  	newm(templateThread, nil)
  1869  }
  1870  
  1871  // templateThread is a thread in a known-good state that exists solely
  1872  // to start new threads in known-good states when the calling thread
  1873  // may not be in a good state.
  1874  //
  1875  // Many programs never need this, so templateThread is started lazily
  1876  // when we first enter a state that might lead to running on a thread
  1877  // in an unknown state.
  1878  //
  1879  // templateThread runs on an M without a P, so it must not have write
  1880  // barriers.
  1881  //
  1882  //go:nowritebarrierrec
  1883  func templateThread() {
  1884  	lock(&sched.lock)
  1885  	sched.nmsys++
  1886  	checkdead()
  1887  	unlock(&sched.lock)
  1888  
  1889  	for {
  1890  		lock(&newmHandoff.lock)
  1891  		for newmHandoff.newm != 0 {
  1892  			newm := newmHandoff.newm.ptr()
  1893  			newmHandoff.newm = 0
  1894  			unlock(&newmHandoff.lock)
  1895  			for newm != nil {
  1896  				next := newm.schedlink.ptr()
  1897  				newm.schedlink = 0
  1898  				newm1(newm)
  1899  				newm = next
  1900  			}
  1901  			lock(&newmHandoff.lock)
  1902  		}
  1903  		newmHandoff.waiting = true
  1904  		noteclear(&newmHandoff.wake)
  1905  		unlock(&newmHandoff.lock)
  1906  		notesleep(&newmHandoff.wake)
  1907  	}
  1908  }
  1909  
  1910  // Stops execution of the current m until new work is available.
  1911  // Returns with acquired P.
  1912  func stopm() {
  1913  	_g_ := getg()
  1914  
  1915  	if _g_.m.locks != 0 {
  1916  		throw("stopm holding locks")
  1917  	}
  1918  	if _g_.m.p != 0 {
  1919  		throw("stopm holding p")
  1920  	}
  1921  	if _g_.m.spinning {
  1922  		throw("stopm spinning")
  1923  	}
  1924  
  1925  	lock(&sched.lock)
  1926  	mput(_g_.m)
  1927  	unlock(&sched.lock)
  1928  	notesleep(&_g_.m.park)
  1929  	noteclear(&_g_.m.park)
  1930  	acquirep(_g_.m.nextp.ptr())
  1931  	_g_.m.nextp = 0
  1932  }
  1933  
  1934  func mspinning() {
  1935  	// startm's caller incremented nmspinning. Set the new M's spinning.
  1936  	getg().m.spinning = true
  1937  }
  1938  
  1939  // Schedules some M to run the p (creates an M if necessary).
  1940  // If p==nil, tries to get an idle P, if no idle P's does nothing.
  1941  // May run with m.p==nil, so write barriers are not allowed.
  1942  // If spinning is set, the caller has incremented nmspinning and startm will
  1943  // either decrement nmspinning or set m.spinning in the newly started M.
  1944  //go:nowritebarrierrec
  1945  func startm(_p_ *p, spinning bool) {
  1946  	lock(&sched.lock)
  1947  	if _p_ == nil {
  1948  		_p_ = pidleget()
  1949  		if _p_ == nil {
  1950  			unlock(&sched.lock)
  1951  			if spinning {
  1952  				// The caller incremented nmspinning, but there are no idle Ps,
  1953  				// so it's okay to just undo the increment and give up.
  1954  				if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
  1955  					throw("startm: negative nmspinning")
  1956  				}
  1957  			}
  1958  			return
  1959  		}
  1960  	}
  1961  	mp := mget()
  1962  	unlock(&sched.lock)
  1963  	if mp == nil {
  1964  		var fn func()
  1965  		if spinning {
  1966  			// The caller incremented nmspinning, so set m.spinning in the new M.
  1967  			fn = mspinning
  1968  		}
  1969  		newm(fn, _p_)
  1970  		return
  1971  	}
  1972  	if mp.spinning {
  1973  		throw("startm: m is spinning")
  1974  	}
  1975  	if mp.nextp != 0 {
  1976  		throw("startm: m has p")
  1977  	}
  1978  	if spinning && !runqempty(_p_) {
  1979  		throw("startm: p has runnable gs")
  1980  	}
  1981  	// The caller incremented nmspinning, so set m.spinning in the new M.
  1982  	mp.spinning = spinning
  1983  	mp.nextp.set(_p_)
  1984  	notewakeup(&mp.park)
  1985  }
  1986  
  1987  // Hands off P from syscall or locked M.
  1988  // Always runs without a P, so write barriers are not allowed.
  1989  //go:nowritebarrierrec
  1990  func handoffp(_p_ *p) {
  1991  	// handoffp must start an M in any situation where
  1992  	// findrunnable would return a G to run on _p_.
  1993  
  1994  	// if it has local work, start it straight away
  1995  	if !runqempty(_p_) || sched.runqsize != 0 {
  1996  		startm(_p_, false)
  1997  		return
  1998  	}
  1999  	// if it has GC work, start it straight away
  2000  	if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
  2001  		startm(_p_, false)
  2002  		return
  2003  	}
  2004  	// no local work, check that there are no spinning/idle M's,
  2005  	// otherwise our help is not required
  2006  	if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
  2007  		startm(_p_, true)
  2008  		return
  2009  	}
  2010  	lock(&sched.lock)
  2011  	if sched.gcwaiting != 0 {
  2012  		_p_.status = _Pgcstop
  2013  		sched.stopwait--
  2014  		if sched.stopwait == 0 {
  2015  			notewakeup(&sched.stopnote)
  2016  		}
  2017  		unlock(&sched.lock)
  2018  		return
  2019  	}
  2020  	if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
  2021  		sched.safePointFn(_p_)
  2022  		sched.safePointWait--
  2023  		if sched.safePointWait == 0 {
  2024  			notewakeup(&sched.safePointNote)
  2025  		}
  2026  	}
  2027  	if sched.runqsize != 0 {
  2028  		unlock(&sched.lock)
  2029  		startm(_p_, false)
  2030  		return
  2031  	}
  2032  	// If this is the last running P and nobody is polling network,
  2033  	// need to wakeup another M to poll network.
  2034  	if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
  2035  		unlock(&sched.lock)
  2036  		startm(_p_, false)
  2037  		return
  2038  	}
  2039  	pidleput(_p_)
  2040  	unlock(&sched.lock)
  2041  }
  2042  
  2043  // Tries to add one more P to execute G's.
  2044  // Called when a G is made runnable (newproc, ready).
  2045  func wakep() {
  2046  	// be conservative about spinning threads
  2047  	if !atomic.Cas(&sched.nmspinning, 0, 1) {
  2048  		return
  2049  	}
  2050  	startm(nil, true)
  2051  }
  2052  
  2053  // Stops execution of the current m that is locked to a g until the g is runnable again.
  2054  // Returns with acquired P.
  2055  func stoplockedm() {
  2056  	_g_ := getg()
  2057  
  2058  	if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
  2059  		throw("stoplockedm: inconsistent locking")
  2060  	}
  2061  	if _g_.m.p != 0 {
  2062  		// Schedule another M to run this p.
  2063  		_p_ := releasep()
  2064  		handoffp(_p_)
  2065  	}
  2066  	incidlelocked(1)
  2067  	// Wait until another thread schedules lockedg again.
  2068  	notesleep(&_g_.m.park)
  2069  	noteclear(&_g_.m.park)
  2070  	status := readgstatus(_g_.m.lockedg.ptr())
  2071  	if status&^_Gscan != _Grunnable {
  2072  		print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
  2073  		dumpgstatus(_g_)
  2074  		throw("stoplockedm: not runnable")
  2075  	}
  2076  	acquirep(_g_.m.nextp.ptr())
  2077  	_g_.m.nextp = 0
  2078  }
  2079  
  2080  // Schedules the locked m to run the locked gp.
  2081  // May run during STW, so write barriers are not allowed.
  2082  //go:nowritebarrierrec
  2083  func startlockedm(gp *g) {
  2084  	_g_ := getg()
  2085  
  2086  	mp := gp.lockedm.ptr()
  2087  	if mp == _g_.m {
  2088  		throw("startlockedm: locked to me")
  2089  	}
  2090  	if mp.nextp != 0 {
  2091  		throw("startlockedm: m has p")
  2092  	}
  2093  	// directly handoff current P to the locked m
  2094  	incidlelocked(-1)
  2095  	_p_ := releasep()
  2096  	mp.nextp.set(_p_)
  2097  	notewakeup(&mp.park)
  2098  	stopm()
  2099  }
  2100  
  2101  // Stops the current m for stopTheWorld.
  2102  // Returns when the world is restarted.
  2103  func gcstopm() {
  2104  	_g_ := getg()
  2105  
  2106  	if sched.gcwaiting == 0 {
  2107  		throw("gcstopm: not waiting for gc")
  2108  	}
  2109  	if _g_.m.spinning {
  2110  		_g_.m.spinning = false
  2111  		// OK to just drop nmspinning here,
  2112  		// startTheWorld will unpark threads as necessary.
  2113  		if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
  2114  			throw("gcstopm: negative nmspinning")
  2115  		}
  2116  	}
  2117  	_p_ := releasep()
  2118  	lock(&sched.lock)
  2119  	_p_.status = _Pgcstop
  2120  	sched.stopwait--
  2121  	if sched.stopwait == 0 {
  2122  		notewakeup(&sched.stopnote)
  2123  	}
  2124  	unlock(&sched.lock)
  2125  	stopm()
  2126  }
  2127  
  2128  // Schedules gp to run on the current M.
  2129  // If inheritTime is true, gp inherits the remaining time in the
  2130  // current time slice. Otherwise, it starts a new time slice.
  2131  // Never returns.
  2132  //
  2133  // Write barriers are allowed because this is called immediately after
  2134  // acquiring a P in several places.
  2135  //
  2136  //go:yeswritebarrierrec
  2137  func execute(gp *g, inheritTime bool) {
  2138  	_g_ := getg()
  2139  
  2140  	casgstatus(gp, _Grunnable, _Grunning)
  2141  	gp.waitsince = 0
  2142  	gp.preempt = false
  2143  	gp.stackguard0 = gp.stack.lo + _StackGuard
  2144  	if !inheritTime {
  2145  		_g_.m.p.ptr().schedtick++
  2146  	}
  2147  	_g_.m.curg = gp
  2148  	gp.m = _g_.m
  2149  
  2150  	// Check whether the profiler needs to be turned on or off.
  2151  	hz := sched.profilehz
  2152  	if _g_.m.profilehz != hz {
  2153  		setThreadCPUProfiler(hz)
  2154  	}
  2155  
  2156  	if trace.enabled {
  2157  		// GoSysExit has to happen when we have a P, but before GoStart.
  2158  		// So we emit it here.
  2159  		if gp.syscallsp != 0 && gp.sysblocktraced {
  2160  			traceGoSysExit(gp.sysexitticks)
  2161  		}
  2162  		traceGoStart()
  2163  	}
  2164  
  2165  	gogo(&gp.sched)
  2166  }
  2167  
  2168  // Finds a runnable goroutine to execute.
  2169  // Tries to steal from other P's, get g from global queue, poll network.
  2170  func findrunnable() (gp *g, inheritTime bool) {
  2171  	_g_ := getg()
  2172  
  2173  	// The conditions here and in handoffp must agree: if
  2174  	// findrunnable would return a G to run, handoffp must start
  2175  	// an M.
  2176  
  2177  top:
  2178  	_p_ := _g_.m.p.ptr()
  2179  	if sched.gcwaiting != 0 {
  2180  		gcstopm()
  2181  		goto top
  2182  	}
  2183  	if _p_.runSafePointFn != 0 {
  2184  		runSafePointFn()
  2185  	}
  2186  	if fingwait && fingwake {
  2187  		if gp := wakefing(); gp != nil {
  2188  			ready(gp, 0, true)
  2189  		}
  2190  	}
  2191  	if *cgo_yield != nil {
  2192  		asmcgocall(*cgo_yield, nil)
  2193  	}
  2194  
  2195  	// local runq
  2196  	if gp, inheritTime := runqget(_p_); gp != nil {
  2197  		return gp, inheritTime
  2198  	}
  2199  
  2200  	// global runq
  2201  	if sched.runqsize != 0 {
  2202  		lock(&sched.lock)
  2203  		gp := globrunqget(_p_, 0)
  2204  		unlock(&sched.lock)
  2205  		if gp != nil {
  2206  			return gp, false
  2207  		}
  2208  	}
  2209  
  2210  	// Poll network.
  2211  	// This netpoll is only an optimization before we resort to stealing.
  2212  	// We can safely skip it if there are no waiters or a thread is blocked
  2213  	// in netpoll already. If there is any kind of logical race with that
  2214  	// blocked thread (e.g. it has already returned from netpoll, but does
  2215  	// not set lastpoll yet), this thread will do blocking netpoll below
  2216  	// anyway.
  2217  	if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
  2218  		if list := netpoll(false); !list.empty() { // non-blocking
  2219  			gp := list.pop()
  2220  			injectglist(&list)
  2221  			casgstatus(gp, _Gwaiting, _Grunnable)
  2222  			if trace.enabled {
  2223  				traceGoUnpark(gp, 0)
  2224  			}
  2225  			return gp, false
  2226  		}
  2227  	}
  2228  
  2229  	// Steal work from other P's.
  2230  	procs := uint32(gomaxprocs)
  2231  	if atomic.Load(&sched.npidle) == procs-1 {
  2232  		// Either GOMAXPROCS=1 or everybody, except for us, is idle already.
  2233  		// New work can appear from returning syscall/cgocall, network or timers.
  2234  		// Neither of that submits to local run queues, so no point in stealing.
  2235  		goto stop
  2236  	}
  2237  	// If number of spinning M's >= number of busy P's, block.
  2238  	// This is necessary to prevent excessive CPU consumption
  2239  	// when GOMAXPROCS>>1 but the program parallelism is low.
  2240  	if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) {
  2241  		goto stop
  2242  	}
  2243  	if !_g_.m.spinning {
  2244  		_g_.m.spinning = true
  2245  		atomic.Xadd(&sched.nmspinning, 1)
  2246  	}
  2247  	for i := 0; i < 4; i++ {
  2248  		for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
  2249  			if sched.gcwaiting != 0 {
  2250  				goto top
  2251  			}
  2252  			stealRunNextG := i > 2 // first look for ready queues with more than 1 g
  2253  			if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil {
  2254  				return gp, false
  2255  			}
  2256  		}
  2257  	}
  2258  
  2259  stop:
  2260  
  2261  	// We have nothing to do. If we're in the GC mark phase, can
  2262  	// safely scan and blacken objects, and have work to do, run
  2263  	// idle-time marking rather than give up the P.
  2264  	if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) {
  2265  		_p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
  2266  		gp := _p_.gcBgMarkWorker.ptr()
  2267  		casgstatus(gp, _Gwaiting, _Grunnable)
  2268  		if trace.enabled {
  2269  			traceGoUnpark(gp, 0)
  2270  		}
  2271  		return gp, false
  2272  	}
  2273  
  2274  	// wasm only:
  2275  	// If a callback returned and no other goroutine is awake,
  2276  	// then pause execution until a callback was triggered.
  2277  	if beforeIdle() {
  2278  		// At least one goroutine got woken.
  2279  		goto top
  2280  	}
  2281  
  2282  	// Before we drop our P, make a snapshot of the allp slice,
  2283  	// which can change underfoot once we no longer block
  2284  	// safe-points. We don't need to snapshot the contents because
  2285  	// everything up to cap(allp) is immutable.
  2286  	allpSnapshot := allp
  2287  
  2288  	// return P and block
  2289  	lock(&sched.lock)
  2290  	if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
  2291  		unlock(&sched.lock)
  2292  		goto top
  2293  	}
  2294  	if sched.runqsize != 0 {
  2295  		gp := globrunqget(_p_, 0)
  2296  		unlock(&sched.lock)
  2297  		return gp, false
  2298  	}
  2299  	if releasep() != _p_ {
  2300  		throw("findrunnable: wrong p")
  2301  	}
  2302  	pidleput(_p_)
  2303  	unlock(&sched.lock)
  2304  
  2305  	// Delicate dance: thread transitions from spinning to non-spinning state,
  2306  	// potentially concurrently with submission of new goroutines. We must
  2307  	// drop nmspinning first and then check all per-P queues again (with
  2308  	// #StoreLoad memory barrier in between). If we do it the other way around,
  2309  	// another thread can submit a goroutine after we've checked all run queues
  2310  	// but before we drop nmspinning; as the result nobody will unpark a thread
  2311  	// to run the goroutine.
  2312  	// If we discover new work below, we need to restore m.spinning as a signal
  2313  	// for resetspinning to unpark a new worker thread (because there can be more
  2314  	// than one starving goroutine). However, if after discovering new work
  2315  	// we also observe no idle Ps, it is OK to just park the current thread:
  2316  	// the system is fully loaded so no spinning threads are required.
  2317  	// Also see "Worker thread parking/unparking" comment at the top of the file.
  2318  	wasSpinning := _g_.m.spinning
  2319  	if _g_.m.spinning {
  2320  		_g_.m.spinning = false
  2321  		if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
  2322  			throw("findrunnable: negative nmspinning")
  2323  		}
  2324  	}
  2325  
  2326  	// check all runqueues once again
  2327  	for _, _p_ := range allpSnapshot {
  2328  		if !runqempty(_p_) {
  2329  			lock(&sched.lock)
  2330  			_p_ = pidleget()
  2331  			unlock(&sched.lock)
  2332  			if _p_ != nil {
  2333  				acquirep(_p_)
  2334  				if wasSpinning {
  2335  					_g_.m.spinning = true
  2336  					atomic.Xadd(&sched.nmspinning, 1)
  2337  				}
  2338  				goto top
  2339  			}
  2340  			break
  2341  		}
  2342  	}
  2343  
  2344  	// Check for idle-priority GC work again.
  2345  	if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) {
  2346  		lock(&sched.lock)
  2347  		_p_ = pidleget()
  2348  		if _p_ != nil && _p_.gcBgMarkWorker == 0 {
  2349  			pidleput(_p_)
  2350  			_p_ = nil
  2351  		}
  2352  		unlock(&sched.lock)
  2353  		if _p_ != nil {
  2354  			acquirep(_p_)
  2355  			if wasSpinning {
  2356  				_g_.m.spinning = true
  2357  				atomic.Xadd(&sched.nmspinning, 1)
  2358  			}
  2359  			// Go back to idle GC check.
  2360  			goto stop
  2361  		}
  2362  	}
  2363  
  2364  	// poll network
  2365  	if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
  2366  		if _g_.m.p != 0 {
  2367  			throw("findrunnable: netpoll with p")
  2368  		}
  2369  		if _g_.m.spinning {
  2370  			throw("findrunnable: netpoll with spinning")
  2371  		}
  2372  		list := netpoll(true) // block until new work is available
  2373  		atomic.Store64(&sched.lastpoll, uint64(nanotime()))
  2374  		if !list.empty() {
  2375  			lock(&sched.lock)
  2376  			_p_ = pidleget()
  2377  			unlock(&sched.lock)
  2378  			if _p_ != nil {
  2379  				acquirep(_p_)
  2380  				gp := list.pop()
  2381  				injectglist(&list)
  2382  				casgstatus(gp, _Gwaiting, _Grunnable)
  2383  				if trace.enabled {
  2384  					traceGoUnpark(gp, 0)
  2385  				}
  2386  				return gp, false
  2387  			}
  2388  			injectglist(&list)
  2389  		}
  2390  	}
  2391  	stopm()
  2392  	goto top
  2393  }
  2394  
  2395  // pollWork reports whether there is non-background work this P could
  2396  // be doing. This is a fairly lightweight check to be used for
  2397  // background work loops, like idle GC. It checks a subset of the
  2398  // conditions checked by the actual scheduler.
  2399  func pollWork() bool {
  2400  	if sched.runqsize != 0 {
  2401  		return true
  2402  	}
  2403  	p := getg().m.p.ptr()
  2404  	if !runqempty(p) {
  2405  		return true
  2406  	}
  2407  	if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
  2408  		if list := netpoll(false); !list.empty() {
  2409  			injectglist(&list)
  2410  			return true
  2411  		}
  2412  	}
  2413  	return false
  2414  }
  2415  
  2416  func resetspinning() {
  2417  	_g_ := getg()
  2418  	if !_g_.m.spinning {
  2419  		throw("resetspinning: not a spinning m")
  2420  	}
  2421  	_g_.m.spinning = false
  2422  	nmspinning := atomic.Xadd(&sched.nmspinning, -1)
  2423  	if int32(nmspinning) < 0 {
  2424  		throw("findrunnable: negative nmspinning")
  2425  	}
  2426  	// M wakeup policy is deliberately somewhat conservative, so check if we
  2427  	// need to wakeup another P here. See "Worker thread parking/unparking"
  2428  	// comment at the top of the file for details.
  2429  	if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
  2430  		wakep()
  2431  	}
  2432  }
  2433  
  2434  // Injects the list of runnable G's into the scheduler and clears glist.
  2435  // Can run concurrently with GC.
  2436  func injectglist(glist *gList) {
  2437  	if glist.empty() {
  2438  		return
  2439  	}
  2440  	if trace.enabled {
  2441  		for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
  2442  			traceGoUnpark(gp, 0)
  2443  		}
  2444  	}
  2445  	lock(&sched.lock)
  2446  	var n int
  2447  	for n = 0; !glist.empty(); n++ {
  2448  		gp := glist.pop()
  2449  		casgstatus(gp, _Gwaiting, _Grunnable)
  2450  		globrunqput(gp)
  2451  	}
  2452  	unlock(&sched.lock)
  2453  	for ; n != 0 && sched.npidle != 0; n-- {
  2454  		startm(nil, false)
  2455  	}
  2456  	*glist = gList{}
  2457  }
  2458  
  2459  // One round of scheduler: find a runnable goroutine and execute it.
  2460  // Never returns.
  2461  func schedule() {
  2462  	_g_ := getg()
  2463  
  2464  	if _g_.m.locks != 0 {
  2465  		throw("schedule: holding locks")
  2466  	}
  2467  
  2468  	if _g_.m.lockedg != 0 {
  2469  		stoplockedm()
  2470  		execute(_g_.m.lockedg.ptr(), false) // Never returns.
  2471  	}
  2472  
  2473  	// We should not schedule away from a g that is executing a cgo call,
  2474  	// since the cgo call is using the m's g0 stack.
  2475  	if _g_.m.incgo {
  2476  		throw("schedule: in cgo")
  2477  	}
  2478  
  2479  top:
  2480  	if sched.gcwaiting != 0 {
  2481  		gcstopm()
  2482  		goto top
  2483  	}
  2484  	if _g_.m.p.ptr().runSafePointFn != 0 {
  2485  		runSafePointFn()
  2486  	}
  2487  
  2488  	var gp *g
  2489  	var inheritTime bool
  2490  
  2491  	// Normal goroutines will check for need to wakeP in ready,
  2492  	// but GCworkers and tracereaders will not, so the check must
  2493  	// be done here instead.
  2494  	tryWakeP := false
  2495  	if trace.enabled || trace.shutdown {
  2496  		gp = traceReader()
  2497  		if gp != nil {
  2498  			casgstatus(gp, _Gwaiting, _Grunnable)
  2499  			traceGoUnpark(gp, 0)
  2500  			tryWakeP = true
  2501  		}
  2502  	}
  2503  	if gp == nil && gcBlackenEnabled != 0 {
  2504  		gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
  2505  		tryWakeP = tryWakeP || gp != nil
  2506  	}
  2507  	if gp == nil {
  2508  		// Check the global runnable queue once in a while to ensure fairness.
  2509  		// Otherwise two goroutines can completely occupy the local runqueue
  2510  		// by constantly respawning each other.
  2511  		if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
  2512  			lock(&sched.lock)
  2513  			gp = globrunqget(_g_.m.p.ptr(), 1)
  2514  			unlock(&sched.lock)
  2515  		}
  2516  	}
  2517  	if gp == nil {
  2518  		gp, inheritTime = runqget(_g_.m.p.ptr())
  2519  		if gp != nil && _g_.m.spinning {
  2520  			throw("schedule: spinning with local work")
  2521  		}
  2522  	}
  2523  	if gp == nil {
  2524  		gp, inheritTime = findrunnable() // blocks until work is available
  2525  	}
  2526  
  2527  	// This thread is going to run a goroutine and is not spinning anymore,
  2528  	// so if it was marked as spinning we need to reset it now and potentially
  2529  	// start a new spinning M.
  2530  	if _g_.m.spinning {
  2531  		resetspinning()
  2532  	}
  2533  
  2534  	if sched.disable.user && !schedEnabled(gp) {
  2535  		// Scheduling of this goroutine is disabled. Put it on
  2536  		// the list of pending runnable goroutines for when we
  2537  		// re-enable user scheduling and look again.
  2538  		lock(&sched.lock)
  2539  		if schedEnabled(gp) {
  2540  			// Something re-enabled scheduling while we
  2541  			// were acquiring the lock.
  2542  			unlock(&sched.lock)
  2543  		} else {
  2544  			sched.disable.runnable.pushBack(gp)
  2545  			sched.disable.n++
  2546  			unlock(&sched.lock)
  2547  			goto top
  2548  		}
  2549  	}
  2550  
  2551  	// If about to schedule a not-normal goroutine (a GCworker or tracereader),
  2552  	// wake a P if there is one.
  2553  	if tryWakeP {
  2554  		if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
  2555  			wakep()
  2556  		}
  2557  	}
  2558  	if gp.lockedm != 0 {
  2559  		// Hands off own p to the locked m,
  2560  		// then blocks waiting for a new p.
  2561  		startlockedm(gp)
  2562  		goto top
  2563  	}
  2564  
  2565  	execute(gp, inheritTime)
  2566  }
  2567  
  2568  // dropg removes the association between m and the current goroutine m->curg (gp for short).
  2569  // Typically a caller sets gp's status away from Grunning and then
  2570  // immediately calls dropg to finish the job. The caller is also responsible
  2571  // for arranging that gp will be restarted using ready at an
  2572  // appropriate time. After calling dropg and arranging for gp to be
  2573  // readied later, the caller can do other work but eventually should
  2574  // call schedule to restart the scheduling of goroutines on this m.
  2575  func dropg() {
  2576  	_g_ := getg()
  2577  
  2578  	setMNoWB(&_g_.m.curg.m, nil)
  2579  	setGNoWB(&_g_.m.curg, nil)
  2580  }
  2581  
  2582  func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
  2583  	unlock((*mutex)(lock))
  2584  	return true
  2585  }
  2586  
  2587  // park continuation on g0.
  2588  func park_m(gp *g) {
  2589  	_g_ := getg()
  2590  
  2591  	if trace.enabled {
  2592  		traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
  2593  	}
  2594  
  2595  	casgstatus(gp, _Grunning, _Gwaiting)
  2596  	dropg()
  2597  
  2598  	if fn := _g_.m.waitunlockf; fn != nil {
  2599  		ok := fn(gp, _g_.m.waitlock)
  2600  		_g_.m.waitunlockf = nil
  2601  		_g_.m.waitlock = nil
  2602  		if !ok {
  2603  			if trace.enabled {
  2604  				traceGoUnpark(gp, 2)
  2605  			}
  2606  			casgstatus(gp, _Gwaiting, _Grunnable)
  2607  			execute(gp, true) // Schedule it back, never returns.
  2608  		}
  2609  	}
  2610  	schedule()
  2611  }
  2612  
  2613  func goschedImpl(gp *g) {
  2614  	status := readgstatus(gp)
  2615  	if status&^_Gscan != _Grunning {
  2616  		dumpgstatus(gp)
  2617  		throw("bad g status")
  2618  	}
  2619  	casgstatus(gp, _Grunning, _Grunnable)
  2620  	dropg()
  2621  	lock(&sched.lock)
  2622  	globrunqput(gp)
  2623  	unlock(&sched.lock)
  2624  
  2625  	schedule()
  2626  }
  2627  
  2628  // Gosched continuation on g0.
  2629  func gosched_m(gp *g) {
  2630  	if trace.enabled {
  2631  		traceGoSched()
  2632  	}
  2633  	goschedImpl(gp)
  2634  }
  2635  
  2636  // goschedguarded is a forbidden-states-avoided version of gosched_m
  2637  func goschedguarded_m(gp *g) {
  2638  
  2639  	if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning {
  2640  		gogo(&gp.sched) // never return
  2641  	}
  2642  
  2643  	if trace.enabled {
  2644  		traceGoSched()
  2645  	}
  2646  	goschedImpl(gp)
  2647  }
  2648  
  2649  func gopreempt_m(gp *g) {
  2650  	if trace.enabled {
  2651  		traceGoPreempt()
  2652  	}
  2653  	goschedImpl(gp)
  2654  }
  2655  
  2656  // Finishes execution of the current goroutine.
  2657  func goexit1() {
  2658  	if raceenabled {
  2659  		racegoend()
  2660  	}
  2661  	if trace.enabled {
  2662  		traceGoEnd()
  2663  	}
  2664  	mcall(goexit0)
  2665  }
  2666  
  2667  // goexit continuation on g0.
  2668  func goexit0(gp *g) {
  2669  	_g_ := getg()
  2670  
  2671  	casgstatus(gp, _Grunning, _Gdead)
  2672  	if isSystemGoroutine(gp, false) {
  2673  		atomic.Xadd(&sched.ngsys, -1)
  2674  	}
  2675  	gp.m = nil
  2676  	locked := gp.lockedm != 0
  2677  	gp.lockedm = 0
  2678  	_g_.m.lockedg = 0
  2679  	gp.paniconfault = false
  2680  	gp._defer = nil // should be true already but just in case.
  2681  	gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
  2682  	gp.writebuf = nil
  2683  	gp.waitreason = 0
  2684  	gp.param = nil
  2685  	gp.labels = nil
  2686  	gp.timer = nil
  2687  
  2688  	if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
  2689  		// Flush assist credit to the global pool. This gives
  2690  		// better information to pacing if the application is
  2691  		// rapidly creating an exiting goroutines.
  2692  		scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes))
  2693  		atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
  2694  		gp.gcAssistBytes = 0
  2695  	}
  2696  
  2697  	// Note that gp's stack scan is now "valid" because it has no
  2698  	// stack.
  2699  	gp.gcscanvalid = true
  2700  	dropg()
  2701  
  2702  	if GOARCH == "wasm" { // no threads yet on wasm
  2703  		gfput(_g_.m.p.ptr(), gp)
  2704  		schedule() // never returns
  2705  	}
  2706  
  2707  	if _g_.m.lockedInt != 0 {
  2708  		print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
  2709  		throw("internal lockOSThread error")
  2710  	}
  2711  	gfput(_g_.m.p.ptr(), gp)
  2712  	if locked {
  2713  		// The goroutine may have locked this thread because
  2714  		// it put it in an unusual kernel state. Kill it
  2715  		// rather than returning it to the thread pool.
  2716  
  2717  		// Return to mstart, which will release the P and exit
  2718  		// the thread.
  2719  		if GOOS != "plan9" { // See golang.org/issue/22227.
  2720  			gogo(&_g_.m.g0.sched)
  2721  		} else {
  2722  			// Clear lockedExt on plan9 since we may end up re-using
  2723  			// this thread.
  2724  			_g_.m.lockedExt = 0
  2725  		}
  2726  	}
  2727  	schedule()
  2728  }
  2729  
  2730  // save updates getg().sched to refer to pc and sp so that a following
  2731  // gogo will restore pc and sp.
  2732  //
  2733  // save must not have write barriers because invoking a write barrier
  2734  // can clobber getg().sched.
  2735  //
  2736  //go:nosplit
  2737  //go:nowritebarrierrec
  2738  func save(pc, sp uintptr) {
  2739  	_g_ := getg()
  2740  
  2741  	_g_.sched.pc = pc
  2742  	_g_.sched.sp = sp
  2743  	_g_.sched.lr = 0
  2744  	_g_.sched.ret = 0
  2745  	_g_.sched.g = guintptr(unsafe.Pointer(_g_))
  2746  	// We need to ensure ctxt is zero, but can't have a write
  2747  	// barrier here. However, it should always already be zero.
  2748  	// Assert that.
  2749  	if _g_.sched.ctxt != nil {
  2750  		badctxt()
  2751  	}
  2752  }
  2753  
  2754  // The goroutine g is about to enter a system call.
  2755  // Record that it's not using the cpu anymore.
  2756  // This is called only from the go syscall library and cgocall,
  2757  // not from the low-level system calls used by the runtime.
  2758  //
  2759  // Entersyscall cannot split the stack: the gosave must
  2760  // make g->sched refer to the caller's stack segment, because
  2761  // entersyscall is going to return immediately after.
  2762  //
  2763  // Nothing entersyscall calls can split the stack either.
  2764  // We cannot safely move the stack during an active call to syscall,
  2765  // because we do not know which of the uintptr arguments are
  2766  // really pointers (back into the stack).
  2767  // In practice, this means that we make the fast path run through
  2768  // entersyscall doing no-split things, and the slow path has to use systemstack
  2769  // to run bigger things on the system stack.
  2770  //
  2771  // reentersyscall is the entry point used by cgo callbacks, where explicitly
  2772  // saved SP and PC are restored. This is needed when exitsyscall will be called
  2773  // from a function further up in the call stack than the parent, as g->syscallsp
  2774  // must always point to a valid stack frame. entersyscall below is the normal
  2775  // entry point for syscalls, which obtains the SP and PC from the caller.
  2776  //
  2777  // Syscall tracing:
  2778  // At the start of a syscall we emit traceGoSysCall to capture the stack trace.
  2779  // If the syscall does not block, that is it, we do not emit any other events.
  2780  // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
  2781  // when syscall returns we emit traceGoSysExit and when the goroutine starts running
  2782  // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
  2783  // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
  2784  // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
  2785  // whoever emits traceGoSysBlock increments p.syscalltick afterwards;
  2786  // and we wait for the increment before emitting traceGoSysExit.
  2787  // Note that the increment is done even if tracing is not enabled,
  2788  // because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
  2789  //
  2790  //go:nosplit
  2791  func reentersyscall(pc, sp uintptr) {
  2792  	_g_ := getg()
  2793  
  2794  	// Disable preemption because during this function g is in Gsyscall status,
  2795  	// but can have inconsistent g->sched, do not let GC observe it.
  2796  	_g_.m.locks++
  2797  
  2798  	// Entersyscall must not call any function that might split/grow the stack.
  2799  	// (See details in comment above.)
  2800  	// Catch calls that might, by replacing the stack guard with something that
  2801  	// will trip any stack check and leaving a flag to tell newstack to die.
  2802  	_g_.stackguard0 = stackPreempt
  2803  	_g_.throwsplit = true
  2804  
  2805  	// Leave SP around for GC and traceback.
  2806  	save(pc, sp)
  2807  	_g_.syscallsp = sp
  2808  	_g_.syscallpc = pc
  2809  	casgstatus(_g_, _Grunning, _Gsyscall)
  2810  	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
  2811  		systemstack(func() {
  2812  			print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
  2813  			throw("entersyscall")
  2814  		})
  2815  	}
  2816  
  2817  	if trace.enabled {
  2818  		systemstack(traceGoSysCall)
  2819  		// systemstack itself clobbers g.sched.{pc,sp} and we might
  2820  		// need them later when the G is genuinely blocked in a
  2821  		// syscall
  2822  		save(pc, sp)
  2823  	}
  2824  
  2825  	if atomic.Load(&sched.sysmonwait) != 0 {
  2826  		systemstack(entersyscall_sysmon)
  2827  		save(pc, sp)
  2828  	}
  2829  
  2830  	if _g_.m.p.ptr().runSafePointFn != 0 {
  2831  		// runSafePointFn may stack split if run on this stack
  2832  		systemstack(runSafePointFn)
  2833  		save(pc, sp)
  2834  	}
  2835  
  2836  	_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
  2837  	_g_.sysblocktraced = true
  2838  	_g_.m.mcache = nil
  2839  	pp := _g_.m.p.ptr()
  2840  	pp.m = 0
  2841  	_g_.m.oldp.set(pp)
  2842  	_g_.m.p = 0
  2843  	atomic.Store(&pp.status, _Psyscall)
  2844  	if sched.gcwaiting != 0 {
  2845  		systemstack(entersyscall_gcwait)
  2846  		save(pc, sp)
  2847  	}
  2848  
  2849  	_g_.m.locks--
  2850  }
  2851  
  2852  // Standard syscall entry used by the go syscall library and normal cgo calls.
  2853  //
  2854  // This is exported via linkname to assembly in the syscall package.
  2855  //
  2856  //go:nosplit
  2857  //go:linkname entersyscall
  2858  func entersyscall() {
  2859  	reentersyscall(getcallerpc(), getcallersp())
  2860  }
  2861  
  2862  func entersyscall_sysmon() {
  2863  	lock(&sched.lock)
  2864  	if atomic.Load(&sched.sysmonwait) != 0 {
  2865  		atomic.Store(&sched.sysmonwait, 0)
  2866  		notewakeup(&sched.sysmonnote)
  2867  	}
  2868  	unlock(&sched.lock)
  2869  }
  2870  
  2871  func entersyscall_gcwait() {
  2872  	_g_ := getg()
  2873  	_p_ := _g_.m.oldp.ptr()
  2874  
  2875  	lock(&sched.lock)
  2876  	if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
  2877  		if trace.enabled {
  2878  			traceGoSysBlock(_p_)
  2879  			traceProcStop(_p_)
  2880  		}
  2881  		_p_.syscalltick++
  2882  		if sched.stopwait--; sched.stopwait == 0 {
  2883  			notewakeup(&sched.stopnote)
  2884  		}
  2885  	}
  2886  	unlock(&sched.lock)
  2887  }
  2888  
  2889  // The same as entersyscall(), but with a hint that the syscall is blocking.
  2890  //go:nosplit
  2891  func entersyscallblock() {
  2892  	_g_ := getg()
  2893  
  2894  	_g_.m.locks++ // see comment in entersyscall
  2895  	_g_.throwsplit = true
  2896  	_g_.stackguard0 = stackPreempt // see comment in entersyscall
  2897  	_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
  2898  	_g_.sysblocktraced = true
  2899  	_g_.m.p.ptr().syscalltick++
  2900  
  2901  	// Leave SP around for GC and traceback.
  2902  	pc := getcallerpc()
  2903  	sp := getcallersp()
  2904  	save(pc, sp)
  2905  	_g_.syscallsp = _g_.sched.sp
  2906  	_g_.syscallpc = _g_.sched.pc
  2907  	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
  2908  		sp1 := sp
  2909  		sp2 := _g_.sched.sp
  2910  		sp3 := _g_.syscallsp
  2911  		systemstack(func() {
  2912  			print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
  2913  			throw("entersyscallblock")
  2914  		})
  2915  	}
  2916  	casgstatus(_g_, _Grunning, _Gsyscall)
  2917  	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
  2918  		systemstack(func() {
  2919  			print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
  2920  			throw("entersyscallblock")
  2921  		})
  2922  	}
  2923  
  2924  	systemstack(entersyscallblock_handoff)
  2925  
  2926  	// Resave for traceback during blocked call.
  2927  	save(getcallerpc(), getcallersp())
  2928  
  2929  	_g_.m.locks--
  2930  }
  2931  
  2932  func entersyscallblock_handoff() {
  2933  	if trace.enabled {
  2934  		traceGoSysCall()
  2935  		traceGoSysBlock(getg().m.p.ptr())
  2936  	}
  2937  	handoffp(releasep())
  2938  }
  2939  
  2940  // The goroutine g exited its system call.
  2941  // Arrange for it to run on a cpu again.
  2942  // This is called only from the go syscall library, not
  2943  // from the low-level system calls used by the runtime.
  2944  //
  2945  // Write barriers are not allowed because our P may have been stolen.
  2946  //
  2947  // This is exported via linkname to assembly in the syscall package.
  2948  //
  2949  //go:nosplit
  2950  //go:nowritebarrierrec
  2951  //go:linkname exitsyscall
  2952  func exitsyscall() {
  2953  	_g_ := getg()
  2954  
  2955  	_g_.m.locks++ // see comment in entersyscall
  2956  	if getcallersp() > _g_.syscallsp {
  2957  		throw("exitsyscall: syscall frame is no longer valid")
  2958  	}
  2959  
  2960  	_g_.waitsince = 0
  2961  	oldp := _g_.m.oldp.ptr()
  2962  	_g_.m.oldp = 0
  2963  	if exitsyscallfast(oldp) {
  2964  		if _g_.m.mcache == nil {
  2965  			throw("lost mcache")
  2966  		}
  2967  		if trace.enabled {
  2968  			if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
  2969  				systemstack(traceGoStart)
  2970  			}
  2971  		}
  2972  		// There's a cpu for us, so we can run.
  2973  		_g_.m.p.ptr().syscalltick++
  2974  		// We need to cas the status and scan before resuming...
  2975  		casgstatus(_g_, _Gsyscall, _Grunning)
  2976  
  2977  		// Garbage collector isn't running (since we are),
  2978  		// so okay to clear syscallsp.
  2979  		_g_.syscallsp = 0
  2980  		_g_.m.locks--
  2981  		if _g_.preempt {
  2982  			// restore the preemption request in case we've cleared it in newstack
  2983  			_g_.stackguard0 = stackPreempt
  2984  		} else {
  2985  			// otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
  2986  			_g_.stackguard0 = _g_.stack.lo + _StackGuard
  2987  		}
  2988  		_g_.throwsplit = false
  2989  
  2990  		if sched.disable.user && !schedEnabled(_g_) {
  2991  			// Scheduling of this goroutine is disabled.
  2992  			Gosched()
  2993  		}
  2994  
  2995  		return
  2996  	}
  2997  
  2998  	_g_.sysexitticks = 0
  2999  	if trace.enabled {
  3000  		// Wait till traceGoSysBlock event is emitted.
  3001  		// This ensures consistency of the trace (the goroutine is started after it is blocked).
  3002  		for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
  3003  			osyield()
  3004  		}
  3005  		// We can't trace syscall exit right now because we don't have a P.
  3006  		// Tracing code can invoke write barriers that cannot run without a P.
  3007  		// So instead we remember the syscall exit time and emit the event
  3008  		// in execute when we have a P.
  3009  		_g_.sysexitticks = cputicks()
  3010  	}
  3011  
  3012  	_g_.m.locks--
  3013  
  3014  	// Call the scheduler.
  3015  	mcall(exitsyscall0)
  3016  
  3017  	if _g_.m.mcache == nil {
  3018  		throw("lost mcache")
  3019  	}
  3020  
  3021  	// Scheduler returned, so we're allowed to run now.
  3022  	// Delete the syscallsp information that we left for
  3023  	// the garbage collector during the system call.
  3024  	// Must wait until now because until gosched returns
  3025  	// we don't know for sure that the garbage collector
  3026  	// is not running.
  3027  	_g_.syscallsp = 0
  3028  	_g_.m.p.ptr().syscalltick++
  3029  	_g_.throwsplit = false
  3030  }
  3031  
  3032  //go:nosplit
  3033  func exitsyscallfast(oldp *p) bool {
  3034  	_g_ := getg()
  3035  
  3036  	// Freezetheworld sets stopwait but does not retake P's.
  3037  	if sched.stopwait == freezeStopWait {
  3038  		return false
  3039  	}
  3040  
  3041  	// Try to re-acquire the last P.
  3042  	if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
  3043  		// There's a cpu for us, so we can run.
  3044  		wirep(oldp)
  3045  		exitsyscallfast_reacquired()
  3046  		return true
  3047  	}
  3048  
  3049  	// Try to get any other idle P.
  3050  	if sched.pidle != 0 {
  3051  		var ok bool
  3052  		systemstack(func() {
  3053  			ok = exitsyscallfast_pidle()
  3054  			if ok && trace.enabled {
  3055  				if oldp != nil {
  3056  					// Wait till traceGoSysBlock event is emitted.
  3057  					// This ensures consistency of the trace (the goroutine is started after it is blocked).
  3058  					for oldp.syscalltick == _g_.m.syscalltick {
  3059  						osyield()
  3060  					}
  3061  				}
  3062  				traceGoSysExit(0)
  3063  			}
  3064  		})
  3065  		if ok {
  3066  			return true
  3067  		}
  3068  	}
  3069  	return false
  3070  }
  3071  
  3072  // exitsyscallfast_reacquired is the exitsyscall path on which this G
  3073  // has successfully reacquired the P it was running on before the
  3074  // syscall.
  3075  //
  3076  //go:nosplit
  3077  func exitsyscallfast_reacquired() {
  3078  	_g_ := getg()
  3079  	if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
  3080  		if trace.enabled {
  3081  			// The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
  3082  			// traceGoSysBlock for this syscall was already emitted,
  3083  			// but here we effectively retake the p from the new syscall running on the same p.
  3084  			systemstack(func() {
  3085  				// Denote blocking of the new syscall.
  3086  				traceGoSysBlock(_g_.m.p.ptr())
  3087  				// Denote completion of the current syscall.
  3088  				traceGoSysExit(0)
  3089  			})
  3090  		}
  3091  		_g_.m.p.ptr().syscalltick++
  3092  	}
  3093  }
  3094  
  3095  func exitsyscallfast_pidle() bool {
  3096  	lock(&sched.lock)
  3097  	_p_ := pidleget()
  3098  	if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
  3099  		atomic.Store(&sched.sysmonwait, 0)
  3100  		notewakeup(&sched.sysmonnote)
  3101  	}
  3102  	unlock(&sched.lock)
  3103  	if _p_ != nil {
  3104  		acquirep(_p_)
  3105  		return true
  3106  	}
  3107  	return false
  3108  }
  3109  
  3110  // exitsyscall slow path on g0.
  3111  // Failed to acquire P, enqueue gp as runnable.
  3112  //
  3113  //go:nowritebarrierrec
  3114  func exitsyscall0(gp *g) {
  3115  	_g_ := getg()
  3116  
  3117  	casgstatus(gp, _Gsyscall, _Grunnable)
  3118  	dropg()
  3119  	lock(&sched.lock)
  3120  	var _p_ *p
  3121  	if schedEnabled(_g_) {
  3122  		_p_ = pidleget()
  3123  	}
  3124  	if _p_ == nil {
  3125  		globrunqput(gp)
  3126  	} else if atomic.Load(&sched.sysmonwait) != 0 {
  3127  		atomic.Store(&sched.sysmonwait, 0)
  3128  		notewakeup(&sched.sysmonnote)
  3129  	}
  3130  	unlock(&sched.lock)
  3131  	if _p_ != nil {
  3132  		acquirep(_p_)
  3133  		execute(gp, false) // Never returns.
  3134  	}
  3135  	if _g_.m.lockedg != 0 {
  3136  		// Wait until another thread schedules gp and so m again.
  3137  		stoplockedm()
  3138  		execute(gp, false) // Never returns.
  3139  	}
  3140  	stopm()
  3141  	schedule() // Never returns.
  3142  }
  3143  
  3144  func beforefork() {
  3145  	gp := getg().m.curg
  3146  
  3147  	// Block signals during a fork, so that the child does not run
  3148  	// a signal handler before exec if a signal is sent to the process
  3149  	// group. See issue #18600.
  3150  	gp.m.locks++
  3151  	msigsave(gp.m)
  3152  	sigblock()
  3153  
  3154  	// This function is called before fork in syscall package.
  3155  	// Code between fork and exec must not allocate memory nor even try to grow stack.
  3156  	// Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
  3157  	// runtime_AfterFork will undo this in parent process, but not in child.
  3158  	gp.stackguard0 = stackFork
  3159  }
  3160  
  3161  // Called from syscall package before fork.
  3162  //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
  3163  //go:nosplit
  3164  func syscall_runtime_BeforeFork() {
  3165  	systemstack(beforefork)
  3166  }
  3167  
  3168  func afterfork() {
  3169  	gp := getg().m.curg
  3170  
  3171  	// See the comments in beforefork.
  3172  	gp.stackguard0 = gp.stack.lo + _StackGuard
  3173  
  3174  	msigrestore(gp.m.sigmask)
  3175  
  3176  	gp.m.locks--
  3177  }
  3178  
  3179  // Called from syscall package after fork in parent.
  3180  //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
  3181  //go:nosplit
  3182  func syscall_runtime_AfterFork() {
  3183  	systemstack(afterfork)
  3184  }
  3185  
  3186  // inForkedChild is true while manipulating signals in the child process.
  3187  // This is used to avoid calling libc functions in case we are using vfork.
  3188  var inForkedChild bool
  3189  
  3190  // Called from syscall package after fork in child.
  3191  // It resets non-sigignored signals to the default handler, and
  3192  // restores the signal mask in preparation for the exec.
  3193  //
  3194  // Because this might be called during a vfork, and therefore may be
  3195  // temporarily sharing address space with the parent process, this must
  3196  // not change any global variables or calling into C code that may do so.
  3197  //
  3198  //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild
  3199  //go:nosplit
  3200  //go:nowritebarrierrec
  3201  func syscall_runtime_AfterForkInChild() {
  3202  	// It's OK to change the global variable inForkedChild here
  3203  	// because we are going to change it back. There is no race here,
  3204  	// because if we are sharing address space with the parent process,
  3205  	// then the parent process can not be running concurrently.
  3206  	inForkedChild = true
  3207  
  3208  	clearSignalHandlers()
  3209  
  3210  	// When we are the child we are the only thread running,
  3211  	// so we know that nothing else has changed gp.m.sigmask.
  3212  	msigrestore(getg().m.sigmask)
  3213  
  3214  	inForkedChild = false
  3215  }
  3216  
  3217  // Called from syscall package before Exec.
  3218  //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec
  3219  func syscall_runtime_BeforeExec() {
  3220  	// Prevent thread creation during exec.
  3221  	execLock.lock()
  3222  }
  3223  
  3224  // Called from syscall package after Exec.
  3225  //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec
  3226  func syscall_runtime_AfterExec() {
  3227  	execLock.unlock()
  3228  }
  3229  
  3230  // Allocate a new g, with a stack big enough for stacksize bytes.
  3231  func malg(stacksize int32) *g {
  3232  	newg := new(g)
  3233  	if stacksize >= 0 {
  3234  		stacksize = round2(_StackSystem + stacksize)
  3235  		systemstack(func() {
  3236  			newg.stack = stackalloc(uint32(stacksize))
  3237  		})
  3238  		newg.stackguard0 = newg.stack.lo + _StackGuard
  3239  		newg.stackguard1 = ^uintptr(0)
  3240  	}
  3241  	return newg
  3242  }
  3243  
  3244  // Create a new g running fn with siz bytes of arguments.
  3245  // Put it on the queue of g's waiting to run.
  3246  // The compiler turns a go statement into a call to this.
  3247  // Cannot split the stack because it assumes that the arguments
  3248  // are available sequentially after &fn; they would not be
  3249  // copied if a stack split occurred.
  3250  //go:nosplit
  3251  func newproc(siz int32, fn *funcval) {
  3252  	argp := add(unsafe.Pointer(&fn), sys.PtrSize)
  3253  	gp := getg()
  3254  	pc := getcallerpc()
  3255  	systemstack(func() {
  3256  		newproc1(fn, (*uint8)(argp), siz, gp, pc)
  3257  	})
  3258  }
  3259  
  3260  // Create a new g running fn with narg bytes of arguments starting
  3261  // at argp. callerpc is the address of the go statement that created
  3262  // this. The new g is put on the queue of g's waiting to run.
  3263  func newproc1(fn *funcval, argp *uint8, narg int32, callergp *g, callerpc uintptr) {
  3264  	_g_ := getg()
  3265  
  3266  	if fn == nil {
  3267  		_g_.m.throwing = -1 // do not dump full stacks
  3268  		throw("go of nil func value")
  3269  	}
  3270  	acquirem() // disable preemption because it can be holding p in a local var
  3271  	siz := narg
  3272  	siz = (siz + 7) &^ 7
  3273  
  3274  	// We could allocate a larger initial stack if necessary.
  3275  	// Not worth it: this is almost always an error.
  3276  	// 4*sizeof(uintreg): extra space added below
  3277  	// sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
  3278  	if siz >= _StackMin-4*sys.RegSize-sys.RegSize {
  3279  		throw("newproc: function arguments too large for new goroutine")
  3280  	}
  3281  
  3282  	_p_ := _g_.m.p.ptr()
  3283  	newg := gfget(_p_)
  3284  	if newg == nil {
  3285  		newg = malg(_StackMin)
  3286  		casgstatus(newg, _Gidle, _Gdead)
  3287  		allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
  3288  	}
  3289  	if newg.stack.hi == 0 {
  3290  		throw("newproc1: newg missing stack")
  3291  	}
  3292  
  3293  	if readgstatus(newg) != _Gdead {
  3294  		throw("newproc1: new g is not Gdead")
  3295  	}
  3296  
  3297  	totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame
  3298  	totalSize += -totalSize & (sys.SpAlign - 1)                  // align to spAlign
  3299  	sp := newg.stack.hi - totalSize
  3300  	spArg := sp
  3301  	if usesLR {
  3302  		// caller's LR
  3303  		*(*uintptr)(unsafe.Pointer(sp)) = 0
  3304  		prepGoExitFrame(sp)
  3305  		spArg += sys.MinFrameSize
  3306  	}
  3307  	if narg > 0 {
  3308  		memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg))
  3309  		// This is a stack-to-stack copy. If write barriers
  3310  		// are enabled and the source stack is grey (the
  3311  		// destination is always black), then perform a
  3312  		// barrier copy. We do this *after* the memmove
  3313  		// because the destination stack may have garbage on
  3314  		// it.
  3315  		if writeBarrier.needed && !_g_.m.curg.gcscandone {
  3316  			f := findfunc(fn.fn)
  3317  			stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
  3318  			if stkmap.nbit > 0 {
  3319  				// We're in the prologue, so it's always stack map index 0.
  3320  				bv := stackmapdata(stkmap, 0)
  3321  				bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata)
  3322  			}
  3323  		}
  3324  	}
  3325  
  3326  	memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
  3327  	newg.sched.sp = sp
  3328  	newg.stktopsp = sp
  3329  	newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function
  3330  	newg.sched.g = guintptr(unsafe.Pointer(newg))
  3331  	gostartcallfn(&newg.sched, fn)
  3332  	newg.gopc = callerpc
  3333  	newg.ancestors = saveAncestors(callergp)
  3334  	newg.startpc = fn.fn
  3335  	if _g_.m.curg != nil {
  3336  		newg.labels = _g_.m.curg.labels
  3337  	}
  3338  	if isSystemGoroutine(newg, false) {
  3339  		atomic.Xadd(&sched.ngsys, +1)
  3340  	}
  3341  	newg.gcscanvalid = false
  3342  	casgstatus(newg, _Gdead, _Grunnable)
  3343  
  3344  	if _p_.goidcache == _p_.goidcacheend {
  3345  		// Sched.goidgen is the last allocated id,
  3346  		// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
  3347  		// At startup sched.goidgen=0, so main goroutine receives goid=1.
  3348  		_p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
  3349  		_p_.goidcache -= _GoidCacheBatch - 1
  3350  		_p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
  3351  	}
  3352  	newg.goid = int64(_p_.goidcache)
  3353  	_p_.goidcache++
  3354  	if raceenabled {
  3355  		newg.racectx = racegostart(callerpc)
  3356  	}
  3357  	if trace.enabled {
  3358  		traceGoCreate(newg, newg.startpc)
  3359  	}
  3360  	runqput(_p_, newg, true)
  3361  
  3362  	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted {
  3363  		wakep()
  3364  	}
  3365  	releasem(_g_.m)
  3366  }
  3367  
  3368  // saveAncestors copies previous ancestors of the given caller g and
  3369  // includes infor for the current caller into a new set of tracebacks for
  3370  // a g being created.
  3371  func saveAncestors(callergp *g) *[]ancestorInfo {
  3372  	// Copy all prior info, except for the root goroutine (goid 0).
  3373  	if debug.tracebackancestors <= 0 || callergp.goid == 0 {
  3374  		return nil
  3375  	}
  3376  	var callerAncestors []ancestorInfo
  3377  	if callergp.ancestors != nil {
  3378  		callerAncestors = *callergp.ancestors
  3379  	}
  3380  	n := int32(len(callerAncestors)) + 1
  3381  	if n > debug.tracebackancestors {
  3382  		n = debug.tracebackancestors
  3383  	}
  3384  	ancestors := make([]ancestorInfo, n)
  3385  	copy(ancestors[1:], callerAncestors)
  3386  
  3387  	var pcs [_TracebackMaxFrames]uintptr
  3388  	npcs := gcallers(callergp, 0, pcs[:])
  3389  	ipcs := make([]uintptr, npcs)
  3390  	copy(ipcs, pcs[:])
  3391  	ancestors[0] = ancestorInfo{
  3392  		pcs:  ipcs,
  3393  		goid: callergp.goid,
  3394  		gopc: callergp.gopc,
  3395  	}
  3396  
  3397  	ancestorsp := new([]ancestorInfo)
  3398  	*ancestorsp = ancestors
  3399  	return ancestorsp
  3400  }
  3401  
  3402  // Put on gfree list.
  3403  // If local list is too long, transfer a batch to the global list.
  3404  func gfput(_p_ *p, gp *g) {
  3405  	if readgstatus(gp) != _Gdead {
  3406  		throw("gfput: bad status (not Gdead)")
  3407  	}
  3408  
  3409  	stksize := gp.stack.hi - gp.stack.lo
  3410  
  3411  	if stksize != _FixedStack {
  3412  		// non-standard stack size - free it.
  3413  		stackfree(gp.stack)
  3414  		gp.stack.lo = 0
  3415  		gp.stack.hi = 0
  3416  		gp.stackguard0 = 0
  3417  	}
  3418  
  3419  	_p_.gFree.push(gp)
  3420  	_p_.gFree.n++
  3421  	if _p_.gFree.n >= 64 {
  3422  		lock(&sched.gFree.lock)
  3423  		for _p_.gFree.n >= 32 {
  3424  			_p_.gFree.n--
  3425  			gp = _p_.gFree.pop()
  3426  			if gp.stack.lo == 0 {
  3427  				sched.gFree.noStack.push(gp)
  3428  			} else {
  3429  				sched.gFree.stack.push(gp)
  3430  			}
  3431  			sched.gFree.n++
  3432  		}
  3433  		unlock(&sched.gFree.lock)
  3434  	}
  3435  }
  3436  
  3437  // Get from gfree list.
  3438  // If local list is empty, grab a batch from global list.
  3439  func gfget(_p_ *p) *g {
  3440  retry:
  3441  	if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
  3442  		lock(&sched.gFree.lock)
  3443  		// Move a batch of free Gs to the P.
  3444  		for _p_.gFree.n < 32 {
  3445  			// Prefer Gs with stacks.
  3446  			gp := sched.gFree.stack.pop()
  3447  			if gp == nil {
  3448  				gp = sched.gFree.noStack.pop()
  3449  				if gp == nil {
  3450  					break
  3451  				}
  3452  			}
  3453  			sched.gFree.n--
  3454  			_p_.gFree.push(gp)
  3455  			_p_.gFree.n++
  3456  		}
  3457  		unlock(&sched.gFree.lock)
  3458  		goto retry
  3459  	}
  3460  	gp := _p_.gFree.pop()
  3461  	if gp == nil {
  3462  		return nil
  3463  	}
  3464  	_p_.gFree.n--
  3465  	if gp.stack.lo == 0 {
  3466  		// Stack was deallocated in gfput. Allocate a new one.
  3467  		systemstack(func() {
  3468  			gp.stack = stackalloc(_FixedStack)
  3469  		})
  3470  		gp.stackguard0 = gp.stack.lo + _StackGuard
  3471  	} else {
  3472  		if raceenabled {
  3473  			racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
  3474  		}
  3475  		if msanenabled {
  3476  			msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
  3477  		}
  3478  	}
  3479  	return gp
  3480  }
  3481  
  3482  // Purge all cached G's from gfree list to the global list.
  3483  func gfpurge(_p_ *p) {
  3484  	lock(&sched.gFree.lock)
  3485  	for !_p_.gFree.empty() {
  3486  		gp := _p_.gFree.pop()
  3487  		_p_.gFree.n--
  3488  		if gp.stack.lo == 0 {
  3489  			sched.gFree.noStack.push(gp)
  3490  		} else {
  3491  			sched.gFree.stack.push(gp)
  3492  		}
  3493  		sched.gFree.n++
  3494  	}
  3495  	unlock(&sched.gFree.lock)
  3496  }
  3497  
  3498  // Breakpoint executes a breakpoint trap.
  3499  func Breakpoint() {
  3500  	breakpoint()
  3501  }
  3502  
  3503  // dolockOSThread is called by LockOSThread and lockOSThread below
  3504  // after they modify m.locked. Do not allow preemption during this call,
  3505  // or else the m might be different in this function than in the caller.
  3506  //go:nosplit
  3507  func dolockOSThread() {
  3508  	if GOARCH == "wasm" {
  3509  		return // no threads on wasm yet
  3510  	}
  3511  	_g_ := getg()
  3512  	_g_.m.lockedg.set(_g_)
  3513  	_g_.lockedm.set(_g_.m)
  3514  }
  3515  
  3516  //go:nosplit
  3517  
  3518  // LockOSThread wires the calling goroutine to its current operating system thread.
  3519  // The calling goroutine will always execute in that thread,
  3520  // and no other goroutine will execute in it,
  3521  // until the calling goroutine has made as many calls to
  3522  // UnlockOSThread as to LockOSThread.
  3523  // If the calling goroutine exits without unlocking the thread,
  3524  // the thread will be terminated.
  3525  //
  3526  // All init functions are run on the startup thread. Calling LockOSThread
  3527  // from an init function will cause the main function to be invoked on
  3528  // that thread.
  3529  //
  3530  // A goroutine should call LockOSThread before calling OS services or
  3531  // non-Go library functions that depend on per-thread state.
  3532  func LockOSThread() {
  3533  	if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
  3534  		// If we need to start a new thread from the locked
  3535  		// thread, we need the template thread. Start it now
  3536  		// while we're in a known-good state.
  3537  		startTemplateThread()
  3538  	}
  3539  	_g_ := getg()
  3540  	_g_.m.lockedExt++
  3541  	if _g_.m.lockedExt == 0 {
  3542  		_g_.m.lockedExt--
  3543  		panic("LockOSThread nesting overflow")
  3544  	}
  3545  	dolockOSThread()
  3546  }
  3547  
  3548  //go:nosplit
  3549  func lockOSThread() {
  3550  	getg().m.lockedInt++
  3551  	dolockOSThread()
  3552  }
  3553  
  3554  // dounlockOSThread is called by UnlockOSThread and unlockOSThread below
  3555  // after they update m->locked. Do not allow preemption during this call,
  3556  // or else the m might be in different in this function than in the caller.
  3557  //go:nosplit
  3558  func dounlockOSThread() {
  3559  	if GOARCH == "wasm" {
  3560  		return // no threads on wasm yet
  3561  	}
  3562  	_g_ := getg()
  3563  	if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
  3564  		return
  3565  	}
  3566  	_g_.m.lockedg = 0
  3567  	_g_.lockedm = 0
  3568  }
  3569  
  3570  //go:nosplit
  3571  
  3572  // UnlockOSThread undoes an earlier call to LockOSThread.
  3573  // If this drops the number of active LockOSThread calls on the
  3574  // calling goroutine to zero, it unwires the calling goroutine from
  3575  // its fixed operating system thread.
  3576  // If there are no active LockOSThread calls, this is a no-op.
  3577  //
  3578  // Before calling UnlockOSThread, the caller must ensure that the OS
  3579  // thread is suitable for running other goroutines. If the caller made
  3580  // any permanent changes to the state of the thread that would affect
  3581  // other goroutines, it should not call this function and thus leave
  3582  // the goroutine locked to the OS thread until the goroutine (and
  3583  // hence the thread) exits.
  3584  func UnlockOSThread() {
  3585  	_g_ := getg()
  3586  	if _g_.m.lockedExt == 0 {
  3587  		return
  3588  	}
  3589  	_g_.m.lockedExt--
  3590  	dounlockOSThread()
  3591  }
  3592  
  3593  //go:nosplit
  3594  func unlockOSThread() {
  3595  	_g_ := getg()
  3596  	if _g_.m.lockedInt == 0 {
  3597  		systemstack(badunlockosthread)
  3598  	}
  3599  	_g_.m.lockedInt--
  3600  	dounlockOSThread()
  3601  }
  3602  
  3603  func badunlockosthread() {
  3604  	throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
  3605  }
  3606  
  3607  func gcount() int32 {
  3608  	n := int32(allglen) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
  3609  	for _, _p_ := range allp {
  3610  		n -= _p_.gFree.n
  3611  	}
  3612  
  3613  	// All these variables can be changed concurrently, so the result can be inconsistent.
  3614  	// But at least the current goroutine is running.
  3615  	if n < 1 {
  3616  		n = 1
  3617  	}
  3618  	return n
  3619  }
  3620  
  3621  func mcount() int32 {
  3622  	return int32(sched.mnext - sched.nmfreed)
  3623  }
  3624  
  3625  var prof struct {
  3626  	signalLock uint32
  3627  	hz         int32
  3628  }
  3629  
  3630  func _System()                    { _System() }
  3631  func _ExternalCode()              { _ExternalCode() }
  3632  func _LostExternalCode()          { _LostExternalCode() }
  3633  func _GC()                        { _GC() }
  3634  func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
  3635  func _VDSO()                      { _VDSO() }
  3636  
  3637  // Called if we receive a SIGPROF signal.
  3638  // Called by the signal handler, may run during STW.
  3639  //go:nowritebarrierrec
  3640  func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
  3641  	if prof.hz == 0 {
  3642  		return
  3643  	}
  3644  
  3645  	// On mips{,le}, 64bit atomics are emulated with spinlocks, in
  3646  	// runtime/internal/atomic. If SIGPROF arrives while the program is inside
  3647  	// the critical section, it creates a deadlock (when writing the sample).
  3648  	// As a workaround, create a counter of SIGPROFs while in critical section
  3649  	// to store the count, and pass it to sigprof.add() later when SIGPROF is
  3650  	// received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc).
  3651  	if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
  3652  		if f := findfunc(pc); f.valid() {
  3653  			if hasPrefix(funcname(f), "runtime/internal/atomic") {
  3654  				cpuprof.lostAtomic++
  3655  				return
  3656  			}
  3657  		}
  3658  	}
  3659  
  3660  	// Profiling runs concurrently with GC, so it must not allocate.
  3661  	// Set a trap in case the code does allocate.
  3662  	// Note that on windows, one thread takes profiles of all the
  3663  	// other threads, so mp is usually not getg().m.
  3664  	// In fact mp may not even be stopped.
  3665  	// See golang.org/issue/17165.
  3666  	getg().m.mallocing++
  3667  
  3668  	// Define that a "user g" is a user-created goroutine, and a "system g"
  3669  	// is one that is m->g0 or m->gsignal.
  3670  	//
  3671  	// We might be interrupted for profiling halfway through a
  3672  	// goroutine switch. The switch involves updating three (or four) values:
  3673  	// g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
  3674  	// because once it gets updated the new g is running.
  3675  	//
  3676  	// When switching from a user g to a system g, LR is not considered live,
  3677  	// so the update only affects g, SP, and PC. Since PC must be last, there
  3678  	// the possible partial transitions in ordinary execution are (1) g alone is updated,
  3679  	// (2) both g and SP are updated, and (3) SP alone is updated.
  3680  	// If SP or g alone is updated, we can detect the partial transition by checking
  3681  	// whether the SP is within g's stack bounds. (We could also require that SP
  3682  	// be changed only after g, but the stack bounds check is needed by other
  3683  	// cases, so there is no need to impose an additional requirement.)
  3684  	//
  3685  	// There is one exceptional transition to a system g, not in ordinary execution.
  3686  	// When a signal arrives, the operating system starts the signal handler running
  3687  	// with an updated PC and SP. The g is updated last, at the beginning of the
  3688  	// handler. There are two reasons this is okay. First, until g is updated the
  3689  	// g and SP do not match, so the stack bounds check detects the partial transition.
  3690  	// Second, signal handlers currently run with signals disabled, so a profiling
  3691  	// signal cannot arrive during the handler.
  3692  	//
  3693  	// When switching from a system g to a user g, there are three possibilities.
  3694  	//
  3695  	// First, it may be that the g switch has no PC update, because the SP
  3696  	// either corresponds to a user g throughout (as in asmcgocall)
  3697  	// or because it has been arranged to look like a user g frame
  3698  	// (as in cgocallback_gofunc). In this case, since the entire
  3699  	// transition is a g+SP update, a partial transition updating just one of
  3700  	// those will be detected by the stack bounds check.
  3701  	//
  3702  	// Second, when returning from a signal handler, the PC and SP updates
  3703  	// are performed by the operating system in an atomic update, so the g
  3704  	// update must be done before them. The stack bounds check detects
  3705  	// the partial transition here, and (again) signal handlers run with signals
  3706  	// disabled, so a profiling signal cannot arrive then anyway.
  3707  	//
  3708  	// Third, the common case: it may be that the switch updates g, SP, and PC
  3709  	// separately. If the PC is within any of the functions that does this,
  3710  	// we don't ask for a traceback. C.F. the function setsSP for more about this.
  3711  	//
  3712  	// There is another apparently viable approach, recorded here in case
  3713  	// the "PC within setsSP function" check turns out not to be usable.
  3714  	// It would be possible to delay the update of either g or SP until immediately
  3715  	// before the PC update instruction. Then, because of the stack bounds check,
  3716  	// the only problematic interrupt point is just before that PC update instruction,
  3717  	// and the sigprof handler can detect that instruction and simulate stepping past
  3718  	// it in order to reach a consistent state. On ARM, the update of g must be made
  3719  	// in two places (in R10 and also in a TLS slot), so the delayed update would
  3720  	// need to be the SP update. The sigprof handler must read the instruction at
  3721  	// the current PC and if it was the known instruction (for example, JMP BX or
  3722  	// MOV R2, PC), use that other register in place of the PC value.
  3723  	// The biggest drawback to this solution is that it requires that we can tell
  3724  	// whether it's safe to read from the memory pointed at by PC.
  3725  	// In a correct program, we can test PC == nil and otherwise read,
  3726  	// but if a profiling signal happens at the instant that a program executes
  3727  	// a bad jump (before the program manages to handle the resulting fault)
  3728  	// the profiling handler could fault trying to read nonexistent memory.
  3729  	//
  3730  	// To recap, there are no constraints on the assembly being used for the
  3731  	// transition. We simply require that g and SP match and that the PC is not
  3732  	// in gogo.
  3733  	traceback := true
  3734  	if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) || (mp != nil && mp.vdsoSP != 0) {
  3735  		traceback = false
  3736  	}
  3737  	var stk [maxCPUProfStack]uintptr
  3738  	n := 0
  3739  	if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
  3740  		cgoOff := 0
  3741  		// Check cgoCallersUse to make sure that we are not
  3742  		// interrupting other code that is fiddling with
  3743  		// cgoCallers.  We are running in a signal handler
  3744  		// with all signals blocked, so we don't have to worry
  3745  		// about any other code interrupting us.
  3746  		if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
  3747  			for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
  3748  				cgoOff++
  3749  			}
  3750  			copy(stk[:], mp.cgoCallers[:cgoOff])
  3751  			mp.cgoCallers[0] = 0
  3752  		}
  3753  
  3754  		// Collect Go stack that leads to the cgo call.
  3755  		n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0)
  3756  		if n > 0 {
  3757  			n += cgoOff
  3758  		}
  3759  	} else if traceback {
  3760  		n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
  3761  	}
  3762  
  3763  	if n <= 0 {
  3764  		// Normal traceback is impossible or has failed.
  3765  		// See if it falls into several common cases.
  3766  		n = 0
  3767  		if (GOOS == "windows" || GOOS == "solaris" || GOOS == "illumos" || GOOS == "darwin" || GOOS == "aix") && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
  3768  			// Libcall, i.e. runtime syscall on windows.
  3769  			// Collect Go stack that leads to the call.
  3770  			n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
  3771  		}
  3772  		if n == 0 && mp != nil && mp.vdsoSP != 0 {
  3773  			n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
  3774  		}
  3775  		if n == 0 {
  3776  			// If all of the above has failed, account it against abstract "System" or "GC".
  3777  			n = 2
  3778  			if inVDSOPage(pc) {
  3779  				pc = funcPC(_VDSO) + sys.PCQuantum
  3780  			} else if pc > firstmoduledata.etext {
  3781  				// "ExternalCode" is better than "etext".
  3782  				pc = funcPC(_ExternalCode) + sys.PCQuantum
  3783  			}
  3784  			stk[0] = pc
  3785  			if mp.preemptoff != "" {
  3786  				stk[1] = funcPC(_GC) + sys.PCQuantum
  3787  			} else {
  3788  				stk[1] = funcPC(_System) + sys.PCQuantum
  3789  			}
  3790  		}
  3791  	}
  3792  
  3793  	if prof.hz != 0 {
  3794  		cpuprof.add(gp, stk[:n])
  3795  	}
  3796  	getg().m.mallocing--
  3797  }
  3798  
  3799  // If the signal handler receives a SIGPROF signal on a non-Go thread,
  3800  // it tries to collect a traceback into sigprofCallers.
  3801  // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback.
  3802  var sigprofCallers cgoCallers
  3803  var sigprofCallersUse uint32
  3804  
  3805  // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
  3806  // and the signal handler collected a stack trace in sigprofCallers.
  3807  // When this is called, sigprofCallersUse will be non-zero.
  3808  // g is nil, and what we can do is very limited.
  3809  //go:nosplit
  3810  //go:nowritebarrierrec
  3811  func sigprofNonGo() {
  3812  	if prof.hz != 0 {
  3813  		n := 0
  3814  		for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
  3815  			n++
  3816  		}
  3817  		cpuprof.addNonGo(sigprofCallers[:n])
  3818  	}
  3819  
  3820  	atomic.Store(&sigprofCallersUse, 0)
  3821  }
  3822  
  3823  // sigprofNonGoPC is called when a profiling signal arrived on a
  3824  // non-Go thread and we have a single PC value, not a stack trace.
  3825  // g is nil, and what we can do is very limited.
  3826  //go:nosplit
  3827  //go:nowritebarrierrec
  3828  func sigprofNonGoPC(pc uintptr) {
  3829  	if prof.hz != 0 {
  3830  		stk := []uintptr{
  3831  			pc,
  3832  			funcPC(_ExternalCode) + sys.PCQuantum,
  3833  		}
  3834  		cpuprof.addNonGo(stk)
  3835  	}
  3836  }
  3837  
  3838  // Reports whether a function will set the SP
  3839  // to an absolute value. Important that
  3840  // we don't traceback when these are at the bottom
  3841  // of the stack since we can't be sure that we will
  3842  // find the caller.
  3843  //
  3844  // If the function is not on the bottom of the stack
  3845  // we assume that it will have set it up so that traceback will be consistent,
  3846  // either by being a traceback terminating function
  3847  // or putting one on the stack at the right offset.
  3848  func setsSP(pc uintptr) bool {
  3849  	f := findfunc(pc)
  3850  	if !f.valid() {
  3851  		// couldn't find the function for this PC,
  3852  		// so assume the worst and stop traceback
  3853  		return true
  3854  	}
  3855  	switch f.funcID {
  3856  	case funcID_gogo, funcID_systemstack, funcID_mcall, funcID_morestack:
  3857  		return true
  3858  	}
  3859  	return false
  3860  }
  3861  
  3862  // setcpuprofilerate sets the CPU profiling rate to hz times per second.
  3863  // If hz <= 0, setcpuprofilerate turns off CPU profiling.
  3864  func setcpuprofilerate(hz int32) {
  3865  	// Force sane arguments.
  3866  	if hz < 0 {
  3867  		hz = 0
  3868  	}
  3869  
  3870  	// Disable preemption, otherwise we can be rescheduled to another thread
  3871  	// that has profiling enabled.
  3872  	_g_ := getg()
  3873  	_g_.m.locks++
  3874  
  3875  	// Stop profiler on this thread so that it is safe to lock prof.
  3876  	// if a profiling signal came in while we had prof locked,
  3877  	// it would deadlock.
  3878  	setThreadCPUProfiler(0)
  3879  
  3880  	for !atomic.Cas(&prof.signalLock, 0, 1) {
  3881  		osyield()
  3882  	}
  3883  	if prof.hz != hz {
  3884  		setProcessCPUProfiler(hz)
  3885  		prof.hz = hz
  3886  	}
  3887  	atomic.Store(&prof.signalLock, 0)
  3888  
  3889  	lock(&sched.lock)
  3890  	sched.profilehz = hz
  3891  	unlock(&sched.lock)
  3892  
  3893  	if hz != 0 {
  3894  		setThreadCPUProfiler(hz)
  3895  	}
  3896  
  3897  	_g_.m.locks--
  3898  }
  3899  
  3900  // init initializes pp, which may be a freshly allocated p or a
  3901  // previously destroyed p, and transitions it to status _Pgcstop.
  3902  func (pp *p) init(id int32) {
  3903  	pp.id = id
  3904  	pp.status = _Pgcstop
  3905  	pp.sudogcache = pp.sudogbuf[:0]
  3906  	for i := range pp.deferpool {
  3907  		pp.deferpool[i] = pp.deferpoolbuf[i][:0]
  3908  	}
  3909  	pp.wbBuf.reset()
  3910  	if pp.mcache == nil {
  3911  		if id == 0 {
  3912  			if getg().m.mcache == nil {
  3913  				throw("missing mcache?")
  3914  			}
  3915  			pp.mcache = getg().m.mcache // bootstrap
  3916  		} else {
  3917  			pp.mcache = allocmcache()
  3918  		}
  3919  	}
  3920  	if raceenabled && pp.raceprocctx == 0 {
  3921  		if id == 0 {
  3922  			pp.raceprocctx = raceprocctx0
  3923  			raceprocctx0 = 0 // bootstrap
  3924  		} else {
  3925  			pp.raceprocctx = raceproccreate()
  3926  		}
  3927  	}
  3928  }
  3929  
  3930  // destroy releases all of the resources associated with pp and
  3931  // transitions it to status _Pdead.
  3932  //
  3933  // sched.lock must be held and the world must be stopped.
  3934  func (pp *p) destroy() {
  3935  	// Move all runnable goroutines to the global queue
  3936  	for pp.runqhead != pp.runqtail {
  3937  		// Pop from tail of local queue
  3938  		pp.runqtail--
  3939  		gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
  3940  		// Push onto head of global queue
  3941  		globrunqputhead(gp)
  3942  	}
  3943  	if pp.runnext != 0 {
  3944  		globrunqputhead(pp.runnext.ptr())
  3945  		pp.runnext = 0
  3946  	}
  3947  	// If there's a background worker, make it runnable and put
  3948  	// it on the global queue so it can clean itself up.
  3949  	if gp := pp.gcBgMarkWorker.ptr(); gp != nil {
  3950  		casgstatus(gp, _Gwaiting, _Grunnable)
  3951  		if trace.enabled {
  3952  			traceGoUnpark(gp, 0)
  3953  		}
  3954  		globrunqput(gp)
  3955  		// This assignment doesn't race because the
  3956  		// world is stopped.
  3957  		pp.gcBgMarkWorker.set(nil)
  3958  	}
  3959  	// Flush p's write barrier buffer.
  3960  	if gcphase != _GCoff {
  3961  		wbBufFlush1(pp)
  3962  		pp.gcw.dispose()
  3963  	}
  3964  	for i := range pp.sudogbuf {
  3965  		pp.sudogbuf[i] = nil
  3966  	}
  3967  	pp.sudogcache = pp.sudogbuf[:0]
  3968  	for i := range pp.deferpool {
  3969  		for j := range pp.deferpoolbuf[i] {
  3970  			pp.deferpoolbuf[i][j] = nil
  3971  		}
  3972  		pp.deferpool[i] = pp.deferpoolbuf[i][:0]
  3973  	}
  3974  	freemcache(pp.mcache)
  3975  	pp.mcache = nil
  3976  	gfpurge(pp)
  3977  	traceProcFree(pp)
  3978  	if raceenabled {
  3979  		raceprocdestroy(pp.raceprocctx)
  3980  		pp.raceprocctx = 0
  3981  	}
  3982  	pp.gcAssistTime = 0
  3983  	pp.status = _Pdead
  3984  }
  3985  
  3986  // Change number of processors. The world is stopped, sched is locked.
  3987  // gcworkbufs are not being modified by either the GC or
  3988  // the write barrier code.
  3989  // Returns list of Ps with local work, they need to be scheduled by the caller.
  3990  func procresize(nprocs int32) *p {
  3991  	old := gomaxprocs
  3992  	if old < 0 || nprocs <= 0 {
  3993  		throw("procresize: invalid arg")
  3994  	}
  3995  	if trace.enabled {
  3996  		traceGomaxprocs(nprocs)
  3997  	}
  3998  
  3999  	// update statistics
  4000  	now := nanotime()
  4001  	if sched.procresizetime != 0 {
  4002  		sched.totaltime += int64(old) * (now - sched.procresizetime)
  4003  	}
  4004  	sched.procresizetime = now
  4005  
  4006  	// Grow allp if necessary.
  4007  	if nprocs > int32(len(allp)) {
  4008  		// Synchronize with retake, which could be running
  4009  		// concurrently since it doesn't run on a P.
  4010  		lock(&allpLock)
  4011  		if nprocs <= int32(cap(allp)) {
  4012  			allp = allp[:nprocs]
  4013  		} else {
  4014  			nallp := make([]*p, nprocs)
  4015  			// Copy everything up to allp's cap so we
  4016  			// never lose old allocated Ps.
  4017  			copy(nallp, allp[:cap(allp)])
  4018  			allp = nallp
  4019  		}
  4020  		unlock(&allpLock)
  4021  	}
  4022  
  4023  	// initialize new P's
  4024  	for i := old; i < nprocs; i++ {
  4025  		pp := allp[i]
  4026  		if pp == nil {
  4027  			pp = new(p)
  4028  		}
  4029  		pp.init(i)
  4030  		atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
  4031  	}
  4032  
  4033  	_g_ := getg()
  4034  	if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
  4035  		// continue to use the current P
  4036  		_g_.m.p.ptr().status = _Prunning
  4037  		_g_.m.p.ptr().mcache.prepareForSweep()
  4038  	} else {
  4039  		// release the current P and acquire allp[0].
  4040  		//
  4041  		// We must do this before destroying our current P
  4042  		// because p.destroy itself has write barriers, so we
  4043  		// need to do that from a valid P.
  4044  		if _g_.m.p != 0 {
  4045  			if trace.enabled {
  4046  				// Pretend that we were descheduled
  4047  				// and then scheduled again to keep
  4048  				// the trace sane.
  4049  				traceGoSched()
  4050  				traceProcStop(_g_.m.p.ptr())
  4051  			}
  4052  			_g_.m.p.ptr().m = 0
  4053  		}
  4054  		_g_.m.p = 0
  4055  		_g_.m.mcache = nil
  4056  		p := allp[0]
  4057  		p.m = 0
  4058  		p.status = _Pidle
  4059  		acquirep(p)
  4060  		if trace.enabled {
  4061  			traceGoStart()
  4062  		}
  4063  	}
  4064  
  4065  	// release resources from unused P's
  4066  	for i := nprocs; i < old; i++ {
  4067  		p := allp[i]
  4068  		p.destroy()
  4069  		// can't free P itself because it can be referenced by an M in syscall
  4070  	}
  4071  
  4072  	// Trim allp.
  4073  	if int32(len(allp)) != nprocs {
  4074  		lock(&allpLock)
  4075  		allp = allp[:nprocs]
  4076  		unlock(&allpLock)
  4077  	}
  4078  
  4079  	var runnablePs *p
  4080  	for i := nprocs - 1; i >= 0; i-- {
  4081  		p := allp[i]
  4082  		if _g_.m.p.ptr() == p {
  4083  			continue
  4084  		}
  4085  		p.status = _Pidle
  4086  		if runqempty(p) {
  4087  			pidleput(p)
  4088  		} else {
  4089  			p.m.set(mget())
  4090  			p.link.set(runnablePs)
  4091  			runnablePs = p
  4092  		}
  4093  	}
  4094  	stealOrder.reset(uint32(nprocs))
  4095  	var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
  4096  	atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
  4097  	return runnablePs
  4098  }
  4099  
  4100  // Associate p and the current m.
  4101  //
  4102  // This function is allowed to have write barriers even if the caller
  4103  // isn't because it immediately acquires _p_.
  4104  //
  4105  //go:yeswritebarrierrec
  4106  func acquirep(_p_ *p) {
  4107  	// Do the part that isn't allowed to have write barriers.
  4108  	wirep(_p_)
  4109  
  4110  	// Have p; write barriers now allowed.
  4111  
  4112  	// Perform deferred mcache flush before this P can allocate
  4113  	// from a potentially stale mcache.
  4114  	_p_.mcache.prepareForSweep()
  4115  
  4116  	if trace.enabled {
  4117  		traceProcStart()
  4118  	}
  4119  }
  4120  
  4121  // wirep is the first step of acquirep, which actually associates the
  4122  // current M to _p_. This is broken out so we can disallow write
  4123  // barriers for this part, since we don't yet have a P.
  4124  //
  4125  //go:nowritebarrierrec
  4126  //go:nosplit
  4127  func wirep(_p_ *p) {
  4128  	_g_ := getg()
  4129  
  4130  	if _g_.m.p != 0 || _g_.m.mcache != nil {
  4131  		throw("wirep: already in go")
  4132  	}
  4133  	if _p_.m != 0 || _p_.status != _Pidle {
  4134  		id := int64(0)
  4135  		if _p_.m != 0 {
  4136  			id = _p_.m.ptr().id
  4137  		}
  4138  		print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
  4139  		throw("wirep: invalid p state")
  4140  	}
  4141  	_g_.m.mcache = _p_.mcache
  4142  	_g_.m.p.set(_p_)
  4143  	_p_.m.set(_g_.m)
  4144  	_p_.status = _Prunning
  4145  }
  4146  
  4147  // Disassociate p and the current m.
  4148  func releasep() *p {
  4149  	_g_ := getg()
  4150  
  4151  	if _g_.m.p == 0 || _g_.m.mcache == nil {
  4152  		throw("releasep: invalid arg")
  4153  	}
  4154  	_p_ := _g_.m.p.ptr()
  4155  	if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
  4156  		print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
  4157  		throw("releasep: invalid p state")
  4158  	}
  4159  	if trace.enabled {
  4160  		traceProcStop(_g_.m.p.ptr())
  4161  	}
  4162  	_g_.m.p = 0
  4163  	_g_.m.mcache = nil
  4164  	_p_.m = 0
  4165  	_p_.status = _Pidle
  4166  	return _p_
  4167  }
  4168  
  4169  func incidlelocked(v int32) {
  4170  	lock(&sched.lock)
  4171  	sched.nmidlelocked += v
  4172  	if v > 0 {
  4173  		checkdead()
  4174  	}
  4175  	unlock(&sched.lock)
  4176  }
  4177  
  4178  // Check for deadlock situation.
  4179  // The check is based on number of running M's, if 0 -> deadlock.
  4180  // sched.lock must be held.
  4181  func checkdead() {
  4182  	// For -buildmode=c-shared or -buildmode=c-archive it's OK if
  4183  	// there are no running goroutines. The calling program is
  4184  	// assumed to be running.
  4185  	if islibrary || isarchive {
  4186  		return
  4187  	}
  4188  
  4189  	// If we are dying because of a signal caught on an already idle thread,
  4190  	// freezetheworld will cause all running threads to block.
  4191  	// And runtime will essentially enter into deadlock state,
  4192  	// except that there is a thread that will call exit soon.
  4193  	if panicking > 0 {
  4194  		return
  4195  	}
  4196  
  4197  	// If we are not running under cgo, but we have an extra M then account
  4198  	// for it. (It is possible to have an extra M on Windows without cgo to
  4199  	// accommodate callbacks created by syscall.NewCallback. See issue #6751
  4200  	// for details.)
  4201  	var run0 int32
  4202  	if !iscgo && cgoHasExtraM {
  4203  		mp := lockextra(true)
  4204  		haveExtraM := extraMCount > 0
  4205  		unlockextra(mp)
  4206  		if haveExtraM {
  4207  			run0 = 1
  4208  		}
  4209  	}
  4210  
  4211  	run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
  4212  	if run > run0 {
  4213  		return
  4214  	}
  4215  	if run < 0 {
  4216  		print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
  4217  		throw("checkdead: inconsistent counts")
  4218  	}
  4219  
  4220  	grunning := 0
  4221  	lock(&allglock)
  4222  	for i := 0; i < len(allgs); i++ {
  4223  		gp := allgs[i]
  4224  		if isSystemGoroutine(gp, false) {
  4225  			continue
  4226  		}
  4227  		s := readgstatus(gp)
  4228  		switch s &^ _Gscan {
  4229  		case _Gwaiting:
  4230  			grunning++
  4231  		case _Grunnable,
  4232  			_Grunning,
  4233  			_Gsyscall:
  4234  			unlock(&allglock)
  4235  			print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
  4236  			throw("checkdead: runnable g")
  4237  		}
  4238  	}
  4239  	unlock(&allglock)
  4240  	if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
  4241  		throw("no goroutines (main called runtime.Goexit) - deadlock!")
  4242  	}
  4243  
  4244  	// Maybe jump time forward for playground.
  4245  	gp := timejump()
  4246  	if gp != nil {
  4247  		casgstatus(gp, _Gwaiting, _Grunnable)
  4248  		globrunqput(gp)
  4249  		_p_ := pidleget()
  4250  		if _p_ == nil {
  4251  			throw("checkdead: no p for timer")
  4252  		}
  4253  		mp := mget()
  4254  		if mp == nil {
  4255  			// There should always be a free M since
  4256  			// nothing is running.
  4257  			throw("checkdead: no m for timer")
  4258  		}
  4259  		mp.nextp.set(_p_)
  4260  		notewakeup(&mp.park)
  4261  		return
  4262  	}
  4263  
  4264  	getg().m.throwing = -1 // do not dump full stacks
  4265  	throw("all goroutines are asleep - deadlock!")
  4266  }
  4267  
  4268  // forcegcperiod is the maximum time in nanoseconds between garbage
  4269  // collections. If we go this long without a garbage collection, one
  4270  // is forced to run.
  4271  //
  4272  // This is a variable for testing purposes. It normally doesn't change.
  4273  var forcegcperiod int64 = 2 * 60 * 1e9
  4274  
  4275  // Always runs without a P, so write barriers are not allowed.
  4276  //
  4277  //go:nowritebarrierrec
  4278  func sysmon() {
  4279  	lock(&sched.lock)
  4280  	sched.nmsys++
  4281  	checkdead()
  4282  	unlock(&sched.lock)
  4283  
  4284  	lasttrace := int64(0)
  4285  	idle := 0 // how many cycles in succession we had not wokeup somebody
  4286  	delay := uint32(0)
  4287  	for {
  4288  		if idle == 0 { // start with 20us sleep...
  4289  			delay = 20
  4290  		} else if idle > 50 { // start doubling the sleep after 1ms...
  4291  			delay *= 2
  4292  		}
  4293  		if delay > 10*1000 { // up to 10ms
  4294  			delay = 10 * 1000
  4295  		}
  4296  		usleep(delay)
  4297  		if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
  4298  			lock(&sched.lock)
  4299  			if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
  4300  				atomic.Store(&sched.sysmonwait, 1)
  4301  				unlock(&sched.lock)
  4302  				// Make wake-up period small enough
  4303  				// for the sampling to be correct.
  4304  				maxsleep := forcegcperiod / 2
  4305  				shouldRelax := true
  4306  				if osRelaxMinNS > 0 {
  4307  					next := timeSleepUntil()
  4308  					now := nanotime()
  4309  					if next-now < osRelaxMinNS {
  4310  						shouldRelax = false
  4311  					}
  4312  				}
  4313  				if shouldRelax {
  4314  					osRelax(true)
  4315  				}
  4316  				notetsleep(&sched.sysmonnote, maxsleep)
  4317  				if shouldRelax {
  4318  					osRelax(false)
  4319  				}
  4320  				lock(&sched.lock)
  4321  				atomic.Store(&sched.sysmonwait, 0)
  4322  				noteclear(&sched.sysmonnote)
  4323  				idle = 0
  4324  				delay = 20
  4325  			}
  4326  			unlock(&sched.lock)
  4327  		}
  4328  		// trigger libc interceptors if needed
  4329  		if *cgo_yield != nil {
  4330  			asmcgocall(*cgo_yield, nil)
  4331  		}
  4332  		// poll network if not polled for more than 10ms
  4333  		lastpoll := int64(atomic.Load64(&sched.lastpoll))
  4334  		now := nanotime()
  4335  		if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
  4336  			atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
  4337  			list := netpoll(false) // non-blocking - returns list of goroutines
  4338  			if !list.empty() {
  4339  				// Need to decrement number of idle locked M's
  4340  				// (pretending that one more is running) before injectglist.
  4341  				// Otherwise it can lead to the following situation:
  4342  				// injectglist grabs all P's but before it starts M's to run the P's,
  4343  				// another M returns from syscall, finishes running its G,
  4344  				// observes that there is no work to do and no other running M's
  4345  				// and reports deadlock.
  4346  				incidlelocked(-1)
  4347  				injectglist(&list)
  4348  				incidlelocked(1)
  4349  			}
  4350  		}
  4351  		// retake P's blocked in syscalls
  4352  		// and preempt long running G's
  4353  		if retake(now) != 0 {
  4354  			idle = 0
  4355  		} else {
  4356  			idle++
  4357  		}
  4358  		// check if we need to force a GC
  4359  		if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
  4360  			lock(&forcegc.lock)
  4361  			forcegc.idle = 0
  4362  			var list gList
  4363  			list.push(forcegc.g)
  4364  			injectglist(&list)
  4365  			unlock(&forcegc.lock)
  4366  		}
  4367  		if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
  4368  			lasttrace = now
  4369  			schedtrace(debug.scheddetail > 0)
  4370  		}
  4371  	}
  4372  }
  4373  
  4374  type sysmontick struct {
  4375  	schedtick   uint32
  4376  	schedwhen   int64
  4377  	syscalltick uint32
  4378  	syscallwhen int64
  4379  }
  4380  
  4381  // forcePreemptNS is the time slice given to a G before it is
  4382  // preempted.
  4383  const forcePreemptNS = 10 * 1000 * 1000 // 10ms
  4384  
  4385  func retake(now int64) uint32 {
  4386  	n := 0
  4387  	// Prevent allp slice changes. This lock will be completely
  4388  	// uncontended unless we're already stopping the world.
  4389  	lock(&allpLock)
  4390  	// We can't use a range loop over allp because we may
  4391  	// temporarily drop the allpLock. Hence, we need to re-fetch
  4392  	// allp each time around the loop.
  4393  	for i := 0; i < len(allp); i++ {
  4394  		_p_ := allp[i]
  4395  		if _p_ == nil {
  4396  			// This can happen if procresize has grown
  4397  			// allp but not yet created new Ps.
  4398  			continue
  4399  		}
  4400  		pd := &_p_.sysmontick
  4401  		s := _p_.status
  4402  		sysretake := false
  4403  		if s == _Prunning || s == _Psyscall {
  4404  			// Preempt G if it's running for too long.
  4405  			t := int64(_p_.schedtick)
  4406  			if int64(pd.schedtick) != t {
  4407  				pd.schedtick = uint32(t)
  4408  				pd.schedwhen = now
  4409  			} else if pd.schedwhen+forcePreemptNS <= now {
  4410  				preemptone(_p_)
  4411  				// In case of syscall, preemptone() doesn't
  4412  				// work, because there is no M wired to P.
  4413  				sysretake = true
  4414  			}
  4415  		}
  4416  		if s == _Psyscall {
  4417  			// Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
  4418  			t := int64(_p_.syscalltick)
  4419  			if !sysretake && int64(pd.syscalltick) != t {
  4420  				pd.syscalltick = uint32(t)
  4421  				pd.syscallwhen = now
  4422  				continue
  4423  			}
  4424  			// On the one hand we don't want to retake Ps if there is no other work to do,
  4425  			// but on the other hand we want to retake them eventually
  4426  			// because they can prevent the sysmon thread from deep sleep.
  4427  			if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
  4428  				continue
  4429  			}
  4430  			// Drop allpLock so we can take sched.lock.
  4431  			unlock(&allpLock)
  4432  			// Need to decrement number of idle locked M's
  4433  			// (pretending that one more is running) before the CAS.
  4434  			// Otherwise the M from which we retake can exit the syscall,
  4435  			// increment nmidle and report deadlock.
  4436  			incidlelocked(-1)
  4437  			if atomic.Cas(&_p_.status, s, _Pidle) {
  4438  				if trace.enabled {
  4439  					traceGoSysBlock(_p_)
  4440  					traceProcStop(_p_)
  4441  				}
  4442  				n++
  4443  				_p_.syscalltick++
  4444  				handoffp(_p_)
  4445  			}
  4446  			incidlelocked(1)
  4447  			lock(&allpLock)
  4448  		}
  4449  	}
  4450  	unlock(&allpLock)
  4451  	return uint32(n)
  4452  }
  4453  
  4454  // Tell all goroutines that they have been preempted and they should stop.
  4455  // This function is purely best-effort. It can fail to inform a goroutine if a
  4456  // processor just started running it.
  4457  // No locks need to be held.
  4458  // Returns true if preemption request was issued to at least one goroutine.
  4459  func preemptall() bool {
  4460  	res := false
  4461  	for _, _p_ := range allp {
  4462  		if _p_.status != _Prunning {
  4463  			continue
  4464  		}
  4465  		if preemptone(_p_) {
  4466  			res = true
  4467  		}
  4468  	}
  4469  	return res
  4470  }
  4471  
  4472  // Tell the goroutine running on processor P to stop.
  4473  // This function is purely best-effort. It can incorrectly fail to inform the
  4474  // goroutine. It can send inform the wrong goroutine. Even if it informs the
  4475  // correct goroutine, that goroutine might ignore the request if it is
  4476  // simultaneously executing newstack.
  4477  // No lock needs to be held.
  4478  // Returns true if preemption request was issued.
  4479  // The actual preemption will happen at some point in the future
  4480  // and will be indicated by the gp->status no longer being
  4481  // Grunning
  4482  func preemptone(_p_ *p) bool {
  4483  	mp := _p_.m.ptr()
  4484  	if mp == nil || mp == getg().m {
  4485  		return false
  4486  	}
  4487  	gp := mp.curg
  4488  	if gp == nil || gp == mp.g0 {
  4489  		return false
  4490  	}
  4491  
  4492  	gp.preempt = true
  4493  
  4494  	// Every call in a go routine checks for stack overflow by
  4495  	// comparing the current stack pointer to gp->stackguard0.
  4496  	// Setting gp->stackguard0 to StackPreempt folds
  4497  	// preemption into the normal stack overflow check.
  4498  	gp.stackguard0 = stackPreempt
  4499  	return true
  4500  }
  4501  
  4502  var starttime int64
  4503  
  4504  func schedtrace(detailed bool) {
  4505  	now := nanotime()
  4506  	if starttime == 0 {
  4507  		starttime = now
  4508  	}
  4509  
  4510  	lock(&sched.lock)
  4511  	print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
  4512  	if detailed {
  4513  		print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
  4514  	}
  4515  	// We must be careful while reading data from P's, M's and G's.
  4516  	// Even if we hold schedlock, most data can be changed concurrently.
  4517  	// E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
  4518  	for i, _p_ := range allp {
  4519  		mp := _p_.m.ptr()
  4520  		h := atomic.Load(&_p_.runqhead)
  4521  		t := atomic.Load(&_p_.runqtail)
  4522  		if detailed {
  4523  			id := int64(-1)
  4524  			if mp != nil {
  4525  				id = mp.id
  4526  			}
  4527  			print("  P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, "\n")
  4528  		} else {
  4529  			// In non-detailed mode format lengths of per-P run queues as:
  4530  			// [len1 len2 len3 len4]
  4531  			print(" ")
  4532  			if i == 0 {
  4533  				print("[")
  4534  			}
  4535  			print(t - h)
  4536  			if i == len(allp)-1 {
  4537  				print("]\n")
  4538  			}
  4539  		}
  4540  	}
  4541  
  4542  	if !detailed {
  4543  		unlock(&sched.lock)
  4544  		return
  4545  	}
  4546  
  4547  	for mp := allm; mp != nil; mp = mp.alllink {
  4548  		_p_ := mp.p.ptr()
  4549  		gp := mp.curg
  4550  		lockedg := mp.lockedg.ptr()
  4551  		id1 := int32(-1)
  4552  		if _p_ != nil {
  4553  			id1 = _p_.id
  4554  		}
  4555  		id2 := int64(-1)
  4556  		if gp != nil {
  4557  			id2 = gp.goid
  4558  		}
  4559  		id3 := int64(-1)
  4560  		if lockedg != nil {
  4561  			id3 = lockedg.goid
  4562  		}
  4563  		print("  M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
  4564  	}
  4565  
  4566  	lock(&allglock)
  4567  	for gi := 0; gi < len(allgs); gi++ {
  4568  		gp := allgs[gi]
  4569  		mp := gp.m
  4570  		lockedm := gp.lockedm.ptr()
  4571  		id1 := int64(-1)
  4572  		if mp != nil {
  4573  			id1 = mp.id
  4574  		}
  4575  		id2 := int64(-1)
  4576  		if lockedm != nil {
  4577  			id2 = lockedm.id
  4578  		}
  4579  		print("  G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n")
  4580  	}
  4581  	unlock(&allglock)
  4582  	unlock(&sched.lock)
  4583  }
  4584  
  4585  // schedEnableUser enables or disables the scheduling of user
  4586  // goroutines.
  4587  //
  4588  // This does not stop already running user goroutines, so the caller
  4589  // should first stop the world when disabling user goroutines.
  4590  func schedEnableUser(enable bool) {
  4591  	lock(&sched.lock)
  4592  	if sched.disable.user == !enable {
  4593  		unlock(&sched.lock)
  4594  		return
  4595  	}
  4596  	sched.disable.user = !enable
  4597  	if enable {
  4598  		n := sched.disable.n
  4599  		sched.disable.n = 0
  4600  		globrunqputbatch(&sched.disable.runnable, n)
  4601  		unlock(&sched.lock)
  4602  		for ; n != 0 && sched.npidle != 0; n-- {
  4603  			startm(nil, false)
  4604  		}
  4605  	} else {
  4606  		unlock(&sched.lock)
  4607  	}
  4608  }
  4609  
  4610  // schedEnabled reports whether gp should be scheduled. It returns
  4611  // false is scheduling of gp is disabled.
  4612  func schedEnabled(gp *g) bool {
  4613  	if sched.disable.user {
  4614  		return isSystemGoroutine(gp, true)
  4615  	}
  4616  	return true
  4617  }
  4618  
  4619  // Put mp on midle list.
  4620  // Sched must be locked.
  4621  // May run during STW, so write barriers are not allowed.
  4622  //go:nowritebarrierrec
  4623  func mput(mp *m) {
  4624  	mp.schedlink = sched.midle
  4625  	sched.midle.set(mp)
  4626  	sched.nmidle++
  4627  	checkdead()
  4628  }
  4629  
  4630  // Try to get an m from midle list.
  4631  // Sched must be locked.
  4632  // May run during STW, so write barriers are not allowed.
  4633  //go:nowritebarrierrec
  4634  func mget() *m {
  4635  	mp := sched.midle.ptr()
  4636  	if mp != nil {
  4637  		sched.midle = mp.schedlink
  4638  		sched.nmidle--
  4639  	}
  4640  	return mp
  4641  }
  4642  
  4643  // Put gp on the global runnable queue.
  4644  // Sched must be locked.
  4645  // May run during STW, so write barriers are not allowed.
  4646  //go:nowritebarrierrec
  4647  func globrunqput(gp *g) {
  4648  	sched.runq.pushBack(gp)
  4649  	sched.runqsize++
  4650  }
  4651  
  4652  // Put gp at the head of the global runnable queue.
  4653  // Sched must be locked.
  4654  // May run during STW, so write barriers are not allowed.
  4655  //go:nowritebarrierrec
  4656  func globrunqputhead(gp *g) {
  4657  	sched.runq.push(gp)
  4658  	sched.runqsize++
  4659  }
  4660  
  4661  // Put a batch of runnable goroutines on the global runnable queue.
  4662  // This clears *batch.
  4663  // Sched must be locked.
  4664  func globrunqputbatch(batch *gQueue, n int32) {
  4665  	sched.runq.pushBackAll(*batch)
  4666  	sched.runqsize += n
  4667  	*batch = gQueue{}
  4668  }
  4669  
  4670  // Try get a batch of G's from the global runnable queue.
  4671  // Sched must be locked.
  4672  func globrunqget(_p_ *p, max int32) *g {
  4673  	if sched.runqsize == 0 {
  4674  		return nil
  4675  	}
  4676  
  4677  	n := sched.runqsize/gomaxprocs + 1
  4678  	if n > sched.runqsize {
  4679  		n = sched.runqsize
  4680  	}
  4681  	if max > 0 && n > max {
  4682  		n = max
  4683  	}
  4684  	if n > int32(len(_p_.runq))/2 {
  4685  		n = int32(len(_p_.runq)) / 2
  4686  	}
  4687  
  4688  	sched.runqsize -= n
  4689  
  4690  	gp := sched.runq.pop()
  4691  	n--
  4692  	for ; n > 0; n-- {
  4693  		gp1 := sched.runq.pop()
  4694  		runqput(_p_, gp1, false)
  4695  	}
  4696  	return gp
  4697  }
  4698  
  4699  // Put p to on _Pidle list.
  4700  // Sched must be locked.
  4701  // May run during STW, so write barriers are not allowed.
  4702  //go:nowritebarrierrec
  4703  func pidleput(_p_ *p) {
  4704  	if !runqempty(_p_) {
  4705  		throw("pidleput: P has non-empty run queue")
  4706  	}
  4707  	_p_.link = sched.pidle
  4708  	sched.pidle.set(_p_)
  4709  	atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
  4710  }
  4711  
  4712  // Try get a p from _Pidle list.
  4713  // Sched must be locked.
  4714  // May run during STW, so write barriers are not allowed.
  4715  //go:nowritebarrierrec
  4716  func pidleget() *p {
  4717  	_p_ := sched.pidle.ptr()
  4718  	if _p_ != nil {
  4719  		sched.pidle = _p_.link
  4720  		atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
  4721  	}
  4722  	return _p_
  4723  }
  4724  
  4725  // runqempty reports whether _p_ has no Gs on its local run queue.
  4726  // It never returns true spuriously.
  4727  func runqempty(_p_ *p) bool {
  4728  	// Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail,
  4729  	// 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext.
  4730  	// Simply observing that runqhead == runqtail and then observing that runqnext == nil
  4731  	// does not mean the queue is empty.
  4732  	for {
  4733  		head := atomic.Load(&_p_.runqhead)
  4734  		tail := atomic.Load(&_p_.runqtail)
  4735  		runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
  4736  		if tail == atomic.Load(&_p_.runqtail) {
  4737  			return head == tail && runnext == 0
  4738  		}
  4739  	}
  4740  }
  4741  
  4742  // To shake out latent assumptions about scheduling order,
  4743  // we introduce some randomness into scheduling decisions
  4744  // when running with the race detector.
  4745  // The need for this was made obvious by changing the
  4746  // (deterministic) scheduling order in Go 1.5 and breaking
  4747  // many poorly-written tests.
  4748  // With the randomness here, as long as the tests pass
  4749  // consistently with -race, they shouldn't have latent scheduling
  4750  // assumptions.
  4751  const randomizeScheduler = raceenabled
  4752  
  4753  // runqput tries to put g on the local runnable queue.
  4754  // If next is false, runqput adds g to the tail of the runnable queue.
  4755  // If next is true, runqput puts g in the _p_.runnext slot.
  4756  // If the run queue is full, runnext puts g on the global queue.
  4757  // Executed only by the owner P.
  4758  func runqput(_p_ *p, gp *g, next bool) {
  4759  	if randomizeScheduler && next && fastrand()%2 == 0 {
  4760  		next = false
  4761  	}
  4762  
  4763  	if next {
  4764  	retryNext:
  4765  		oldnext := _p_.runnext
  4766  		if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
  4767  			goto retryNext
  4768  		}
  4769  		if oldnext == 0 {
  4770  			return
  4771  		}
  4772  		// Kick the old runnext out to the regular run queue.
  4773  		gp = oldnext.ptr()
  4774  	}
  4775  
  4776  retry:
  4777  	h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers
  4778  	t := _p_.runqtail
  4779  	if t-h < uint32(len(_p_.runq)) {
  4780  		_p_.runq[t%uint32(len(_p_.runq))].set(gp)
  4781  		atomic.StoreRel(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
  4782  		return
  4783  	}
  4784  	if runqputslow(_p_, gp, h, t) {
  4785  		return
  4786  	}
  4787  	// the queue is not full, now the put above must succeed
  4788  	goto retry
  4789  }
  4790  
  4791  // Put g and a batch of work from local runnable queue on global queue.
  4792  // Executed only by the owner P.
  4793  func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
  4794  	var batch [len(_p_.runq)/2 + 1]*g
  4795  
  4796  	// First, grab a batch from local queue.
  4797  	n := t - h
  4798  	n = n / 2
  4799  	if n != uint32(len(_p_.runq)/2) {
  4800  		throw("runqputslow: queue is not full")
  4801  	}
  4802  	for i := uint32(0); i < n; i++ {
  4803  		batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
  4804  	}
  4805  	if !atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume
  4806  		return false
  4807  	}
  4808  	batch[n] = gp
  4809  
  4810  	if randomizeScheduler {
  4811  		for i := uint32(1); i <= n; i++ {
  4812  			j := fastrandn(i + 1)
  4813  			batch[i], batch[j] = batch[j], batch[i]
  4814  		}
  4815  	}
  4816  
  4817  	// Link the goroutines.
  4818  	for i := uint32(0); i < n; i++ {
  4819  		batch[i].schedlink.set(batch[i+1])
  4820  	}
  4821  	var q gQueue
  4822  	q.head.set(batch[0])
  4823  	q.tail.set(batch[n])
  4824  
  4825  	// Now put the batch on global queue.
  4826  	lock(&sched.lock)
  4827  	globrunqputbatch(&q, int32(n+1))
  4828  	unlock(&sched.lock)
  4829  	return true
  4830  }
  4831  
  4832  // Get g from local runnable queue.
  4833  // If inheritTime is true, gp should inherit the remaining time in the
  4834  // current time slice. Otherwise, it should start a new time slice.
  4835  // Executed only by the owner P.
  4836  func runqget(_p_ *p) (gp *g, inheritTime bool) {
  4837  	// If there's a runnext, it's the next G to run.
  4838  	for {
  4839  		next := _p_.runnext
  4840  		if next == 0 {
  4841  			break
  4842  		}
  4843  		if _p_.runnext.cas(next, 0) {
  4844  			return next.ptr(), true
  4845  		}
  4846  	}
  4847  
  4848  	for {
  4849  		h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
  4850  		t := _p_.runqtail
  4851  		if t == h {
  4852  			return nil, false
  4853  		}
  4854  		gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
  4855  		if atomic.CasRel(&_p_.runqhead, h, h+1) { // cas-release, commits consume
  4856  			return gp, false
  4857  		}
  4858  	}
  4859  }
  4860  
  4861  // Grabs a batch of goroutines from _p_'s runnable queue into batch.
  4862  // Batch is a ring buffer starting at batchHead.
  4863  // Returns number of grabbed goroutines.
  4864  // Can be executed by any P.
  4865  func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
  4866  	for {
  4867  		h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
  4868  		t := atomic.LoadAcq(&_p_.runqtail) // load-acquire, synchronize with the producer
  4869  		n := t - h
  4870  		n = n - n/2
  4871  		if n == 0 {
  4872  			if stealRunNextG {
  4873  				// Try to steal from _p_.runnext.
  4874  				if next := _p_.runnext; next != 0 {
  4875  					if _p_.status == _Prunning {
  4876  						// Sleep to ensure that _p_ isn't about to run the g
  4877  						// we are about to steal.
  4878  						// The important use case here is when the g running
  4879  						// on _p_ ready()s another g and then almost
  4880  						// immediately blocks. Instead of stealing runnext
  4881  						// in this window, back off to give _p_ a chance to
  4882  						// schedule runnext. This will avoid thrashing gs
  4883  						// between different Ps.
  4884  						// A sync chan send/recv takes ~50ns as of time of
  4885  						// writing, so 3us gives ~50x overshoot.
  4886  						if GOOS != "windows" {
  4887  							usleep(3)
  4888  						} else {
  4889  							// On windows system timer granularity is
  4890  							// 1-15ms, which is way too much for this
  4891  							// optimization. So just yield.
  4892  							osyield()
  4893  						}
  4894  					}
  4895  					if !_p_.runnext.cas(next, 0) {
  4896  						continue
  4897  					}
  4898  					batch[batchHead%uint32(len(batch))] = next
  4899  					return 1
  4900  				}
  4901  			}
  4902  			return 0
  4903  		}
  4904  		if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
  4905  			continue
  4906  		}
  4907  		for i := uint32(0); i < n; i++ {
  4908  			g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
  4909  			batch[(batchHead+i)%uint32(len(batch))] = g
  4910  		}
  4911  		if atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume
  4912  			return n
  4913  		}
  4914  	}
  4915  }
  4916  
  4917  // Steal half of elements from local runnable queue of p2
  4918  // and put onto local runnable queue of p.
  4919  // Returns one of the stolen elements (or nil if failed).
  4920  func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
  4921  	t := _p_.runqtail
  4922  	n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
  4923  	if n == 0 {
  4924  		return nil
  4925  	}
  4926  	n--
  4927  	gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
  4928  	if n == 0 {
  4929  		return gp
  4930  	}
  4931  	h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers
  4932  	if t-h+n >= uint32(len(_p_.runq)) {
  4933  		throw("runqsteal: runq overflow")
  4934  	}
  4935  	atomic.StoreRel(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
  4936  	return gp
  4937  }
  4938  
  4939  // A gQueue is a dequeue of Gs linked through g.schedlink. A G can only
  4940  // be on one gQueue or gList at a time.
  4941  type gQueue struct {
  4942  	head guintptr
  4943  	tail guintptr
  4944  }
  4945  
  4946  // empty reports whether q is empty.
  4947  func (q *gQueue) empty() bool {
  4948  	return q.head == 0
  4949  }
  4950  
  4951  // push adds gp to the head of q.
  4952  func (q *gQueue) push(gp *g) {
  4953  	gp.schedlink = q.head
  4954  	q.head.set(gp)
  4955  	if q.tail == 0 {
  4956  		q.tail.set(gp)
  4957  	}
  4958  }
  4959  
  4960  // pushBack adds gp to the tail of q.
  4961  func (q *gQueue) pushBack(gp *g) {
  4962  	gp.schedlink = 0
  4963  	if q.tail != 0 {
  4964  		q.tail.ptr().schedlink.set(gp)
  4965  	} else {
  4966  		q.head.set(gp)
  4967  	}
  4968  	q.tail.set(gp)
  4969  }
  4970  
  4971  // pushBackAll adds all Gs in l2 to the tail of q. After this q2 must
  4972  // not be used.
  4973  func (q *gQueue) pushBackAll(q2 gQueue) {
  4974  	if q2.tail == 0 {
  4975  		return
  4976  	}
  4977  	q2.tail.ptr().schedlink = 0
  4978  	if q.tail != 0 {
  4979  		q.tail.ptr().schedlink = q2.head
  4980  	} else {
  4981  		q.head = q2.head
  4982  	}
  4983  	q.tail = q2.tail
  4984  }
  4985  
  4986  // pop removes and returns the head of queue q. It returns nil if
  4987  // q is empty.
  4988  func (q *gQueue) pop() *g {
  4989  	gp := q.head.ptr()
  4990  	if gp != nil {
  4991  		q.head = gp.schedlink
  4992  		if q.head == 0 {
  4993  			q.tail = 0
  4994  		}
  4995  	}
  4996  	return gp
  4997  }
  4998  
  4999  // popList takes all Gs in q and returns them as a gList.
  5000  func (q *gQueue) popList() gList {
  5001  	stack := gList{q.head}
  5002  	*q = gQueue{}
  5003  	return stack
  5004  }
  5005  
  5006  // A gList is a list of Gs linked through g.schedlink. A G can only be
  5007  // on one gQueue or gList at a time.
  5008  type gList struct {
  5009  	head guintptr
  5010  }
  5011  
  5012  // empty reports whether l is empty.
  5013  func (l *gList) empty() bool {
  5014  	return l.head == 0
  5015  }
  5016  
  5017  // push adds gp to the head of l.
  5018  func (l *gList) push(gp *g) {
  5019  	gp.schedlink = l.head
  5020  	l.head.set(gp)
  5021  }
  5022  
  5023  // pushAll prepends all Gs in q to l.
  5024  func (l *gList) pushAll(q gQueue) {
  5025  	if !q.empty() {
  5026  		q.tail.ptr().schedlink = l.head
  5027  		l.head = q.head
  5028  	}
  5029  }
  5030  
  5031  // pop removes and returns the head of l. If l is empty, it returns nil.
  5032  func (l *gList) pop() *g {
  5033  	gp := l.head.ptr()
  5034  	if gp != nil {
  5035  		l.head = gp.schedlink
  5036  	}
  5037  	return gp
  5038  }
  5039  
  5040  //go:linkname setMaxThreads runtime/debug.setMaxThreads
  5041  func setMaxThreads(in int) (out int) {
  5042  	lock(&sched.lock)
  5043  	out = int(sched.maxmcount)
  5044  	if in > 0x7fffffff { // MaxInt32
  5045  		sched.maxmcount = 0x7fffffff
  5046  	} else {
  5047  		sched.maxmcount = int32(in)
  5048  	}
  5049  	checkmcount()
  5050  	unlock(&sched.lock)
  5051  	return
  5052  }
  5053  
  5054  func haveexperiment(name string) bool {
  5055  	if name == "framepointer" {
  5056  		return framepointer_enabled // set by linker
  5057  	}
  5058  	x := sys.Goexperiment
  5059  	for x != "" {
  5060  		xname := ""
  5061  		i := index(x, ",")
  5062  		if i < 0 {
  5063  			xname, x = x, ""
  5064  		} else {
  5065  			xname, x = x[:i], x[i+1:]
  5066  		}
  5067  		if xname == name {
  5068  			return true
  5069  		}
  5070  		if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name {
  5071  			return false
  5072  		}
  5073  	}
  5074  	return false
  5075  }
  5076  
  5077  //go:nosplit
  5078  func procPin() int {
  5079  	_g_ := getg()
  5080  	mp := _g_.m
  5081  
  5082  	mp.locks++
  5083  	return int(mp.p.ptr().id)
  5084  }
  5085  
  5086  //go:nosplit
  5087  func procUnpin() {
  5088  	_g_ := getg()
  5089  	_g_.m.locks--
  5090  }
  5091  
  5092  //go:linkname sync_runtime_procPin sync.runtime_procPin
  5093  //go:nosplit
  5094  func sync_runtime_procPin() int {
  5095  	return procPin()
  5096  }
  5097  
  5098  //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
  5099  //go:nosplit
  5100  func sync_runtime_procUnpin() {
  5101  	procUnpin()
  5102  }
  5103  
  5104  //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin
  5105  //go:nosplit
  5106  func sync_atomic_runtime_procPin() int {
  5107  	return procPin()
  5108  }
  5109  
  5110  //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin
  5111  //go:nosplit
  5112  func sync_atomic_runtime_procUnpin() {
  5113  	procUnpin()
  5114  }
  5115  
  5116  // Active spinning for sync.Mutex.
  5117  //go:linkname sync_runtime_canSpin sync.runtime_canSpin
  5118  //go:nosplit
  5119  func sync_runtime_canSpin(i int) bool {
  5120  	// sync.Mutex is cooperative, so we are conservative with spinning.
  5121  	// Spin only few times and only if running on a multicore machine and
  5122  	// GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
  5123  	// As opposed to runtime mutex we don't do passive spinning here,
  5124  	// because there can be work on global runq or on other Ps.
  5125  	if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
  5126  		return false
  5127  	}
  5128  	if p := getg().m.p.ptr(); !runqempty(p) {
  5129  		return false
  5130  	}
  5131  	return true
  5132  }
  5133  
  5134  //go:linkname sync_runtime_doSpin sync.runtime_doSpin
  5135  //go:nosplit
  5136  func sync_runtime_doSpin() {
  5137  	procyield(active_spin_cnt)
  5138  }
  5139  
  5140  var stealOrder randomOrder
  5141  
  5142  // randomOrder/randomEnum are helper types for randomized work stealing.
  5143  // They allow to enumerate all Ps in different pseudo-random orders without repetitions.
  5144  // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS
  5145  // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration.
  5146  type randomOrder struct {
  5147  	count    uint32
  5148  	coprimes []uint32
  5149  }
  5150  
  5151  type randomEnum struct {
  5152  	i     uint32
  5153  	count uint32
  5154  	pos   uint32
  5155  	inc   uint32
  5156  }
  5157  
  5158  func (ord *randomOrder) reset(count uint32) {
  5159  	ord.count = count
  5160  	ord.coprimes = ord.coprimes[:0]
  5161  	for i := uint32(1); i <= count; i++ {
  5162  		if gcd(i, count) == 1 {
  5163  			ord.coprimes = append(ord.coprimes, i)
  5164  		}
  5165  	}
  5166  }
  5167  
  5168  func (ord *randomOrder) start(i uint32) randomEnum {
  5169  	return randomEnum{
  5170  		count: ord.count,
  5171  		pos:   i % ord.count,
  5172  		inc:   ord.coprimes[i%uint32(len(ord.coprimes))],
  5173  	}
  5174  }
  5175  
  5176  func (enum *randomEnum) done() bool {
  5177  	return enum.i == enum.count
  5178  }
  5179  
  5180  func (enum *randomEnum) next() {
  5181  	enum.i++
  5182  	enum.pos = (enum.pos + enum.inc) % enum.count
  5183  }
  5184  
  5185  func (enum *randomEnum) position() uint32 {
  5186  	return enum.pos
  5187  }
  5188  
  5189  func gcd(a, b uint32) uint32 {
  5190  	for b != 0 {
  5191  		a, b = b, a%b
  5192  	}
  5193  	return a
  5194  }
  5195  
  5196  // An initTask represents the set of initializations that need to be done for a package.
  5197  type initTask struct {
  5198  	// TODO: pack the first 3 fields more tightly?
  5199  	state uintptr // 0 = uninitialized, 1 = in progress, 2 = done
  5200  	ndeps uintptr
  5201  	nfns  uintptr
  5202  	// followed by ndeps instances of an *initTask, one per package depended on
  5203  	// followed by nfns pcs, one per init function to run
  5204  }
  5205  
  5206  func doInit(t *initTask) {
  5207  	switch t.state {
  5208  	case 2: // fully initialized
  5209  		return
  5210  	case 1: // initialization in progress
  5211  		throw("recursive call during initialization - linker skew")
  5212  	default: // not initialized yet
  5213  		t.state = 1 // initialization in progress
  5214  		for i := uintptr(0); i < t.ndeps; i++ {
  5215  			p := add(unsafe.Pointer(t), (3+i)*sys.PtrSize)
  5216  			t2 := *(**initTask)(p)
  5217  			doInit(t2)
  5218  		}
  5219  		for i := uintptr(0); i < t.nfns; i++ {
  5220  			p := add(unsafe.Pointer(t), (3+t.ndeps+i)*sys.PtrSize)
  5221  			f := *(*func())(unsafe.Pointer(&p))
  5222  			f()
  5223  		}
  5224  		t.state = 2 // initialization done
  5225  	}
  5226  }
  5227  

View as plain text