Source file src/runtime/mgcmark.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: marking and scanning
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  const (
    16  	fixedRootFinalizers = iota
    17  	fixedRootFreeGStacks
    18  	fixedRootCount
    19  
    20  	// rootBlockBytes is the number of bytes to scan per data or
    21  	// BSS root.
    22  	rootBlockBytes = 256 << 10
    23  
    24  	// rootBlockSpans is the number of spans to scan per span
    25  	// root.
    26  	rootBlockSpans = 8 * 1024 // 64MB worth of spans
    27  
    28  	// maxObletBytes is the maximum bytes of an object to scan at
    29  	// once. Larger objects will be split up into "oblets" of at
    30  	// most this size. Since we can scan 1–2 MB/ms, 128 KB bounds
    31  	// scan preemption at ~100 µs.
    32  	//
    33  	// This must be > _MaxSmallSize so that the object base is the
    34  	// span base.
    35  	maxObletBytes = 128 << 10
    36  
    37  	// drainCheckThreshold specifies how many units of work to do
    38  	// between self-preemption checks in gcDrain. Assuming a scan
    39  	// rate of 1 MB/ms, this is ~100 µs. Lower values have higher
    40  	// overhead in the scan loop (the scheduler check may perform
    41  	// a syscall, so its overhead is nontrivial). Higher values
    42  	// make the system less responsive to incoming work.
    43  	drainCheckThreshold = 100000
    44  )
    45  
    46  // gcMarkRootPrepare queues root scanning jobs (stacks, globals, and
    47  // some miscellany) and initializes scanning-related state.
    48  //
    49  // The caller must have call gcCopySpans().
    50  //
    51  // The world must be stopped.
    52  //
    53  //go:nowritebarrier
    54  func gcMarkRootPrepare() {
    55  	work.nFlushCacheRoots = 0
    56  
    57  	// Compute how many data and BSS root blocks there are.
    58  	nBlocks := func(bytes uintptr) int {
    59  		return int((bytes + rootBlockBytes - 1) / rootBlockBytes)
    60  	}
    61  
    62  	work.nDataRoots = 0
    63  	work.nBSSRoots = 0
    64  
    65  	// Scan globals.
    66  	for _, datap := range activeModules() {
    67  		nDataRoots := nBlocks(datap.edata - datap.data)
    68  		if nDataRoots > work.nDataRoots {
    69  			work.nDataRoots = nDataRoots
    70  		}
    71  	}
    72  
    73  	for _, datap := range activeModules() {
    74  		nBSSRoots := nBlocks(datap.ebss - datap.bss)
    75  		if nBSSRoots > work.nBSSRoots {
    76  			work.nBSSRoots = nBSSRoots
    77  		}
    78  	}
    79  
    80  	// Scan span roots for finalizer specials.
    81  	//
    82  	// We depend on addfinalizer to mark objects that get
    83  	// finalizers after root marking.
    84  	//
    85  	// We're only interested in scanning the in-use spans,
    86  	// which will all be swept at this point. More spans
    87  	// may be added to this list during concurrent GC, but
    88  	// we only care about spans that were allocated before
    89  	// this mark phase.
    90  	work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks()
    91  
    92  	// Scan stacks.
    93  	//
    94  	// Gs may be created after this point, but it's okay that we
    95  	// ignore them because they begin life without any roots, so
    96  	// there's nothing to scan, and any roots they create during
    97  	// the concurrent phase will be scanned during mark
    98  	// termination.
    99  	work.nStackRoots = int(atomic.Loaduintptr(&allglen))
   100  
   101  	work.markrootNext = 0
   102  	work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
   103  }
   104  
   105  // gcMarkRootCheck checks that all roots have been scanned. It is
   106  // purely for debugging.
   107  func gcMarkRootCheck() {
   108  	if work.markrootNext < work.markrootJobs {
   109  		print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
   110  		throw("left over markroot jobs")
   111  	}
   112  
   113  	lock(&allglock)
   114  	// Check that stacks have been scanned.
   115  	var gp *g
   116  	for i := 0; i < work.nStackRoots; i++ {
   117  		gp = allgs[i]
   118  		if !gp.gcscandone {
   119  			goto fail
   120  		}
   121  	}
   122  	unlock(&allglock)
   123  	return
   124  
   125  fail:
   126  	println("gp", gp, "goid", gp.goid,
   127  		"status", readgstatus(gp),
   128  		"gcscandone", gp.gcscandone,
   129  		"gcscanvalid", gp.gcscanvalid)
   130  	unlock(&allglock) // Avoid self-deadlock with traceback.
   131  	throw("scan missed a g")
   132  }
   133  
   134  // ptrmask for an allocation containing a single pointer.
   135  var oneptrmask = [...]uint8{1}
   136  
   137  // markroot scans the i'th root.
   138  //
   139  // Preemption must be disabled (because this uses a gcWork).
   140  //
   141  // nowritebarrier is only advisory here.
   142  //
   143  //go:nowritebarrier
   144  func markroot(gcw *gcWork, i uint32) {
   145  	// TODO(austin): This is a bit ridiculous. Compute and store
   146  	// the bases in gcMarkRootPrepare instead of the counts.
   147  	baseFlushCache := uint32(fixedRootCount)
   148  	baseData := baseFlushCache + uint32(work.nFlushCacheRoots)
   149  	baseBSS := baseData + uint32(work.nDataRoots)
   150  	baseSpans := baseBSS + uint32(work.nBSSRoots)
   151  	baseStacks := baseSpans + uint32(work.nSpanRoots)
   152  	end := baseStacks + uint32(work.nStackRoots)
   153  
   154  	// Note: if you add a case here, please also update heapdump.go:dumproots.
   155  	switch {
   156  	case baseFlushCache <= i && i < baseData:
   157  		flushmcache(int(i - baseFlushCache))
   158  
   159  	case baseData <= i && i < baseBSS:
   160  		for _, datap := range activeModules() {
   161  			markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-baseData))
   162  		}
   163  
   164  	case baseBSS <= i && i < baseSpans:
   165  		for _, datap := range activeModules() {
   166  			markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-baseBSS))
   167  		}
   168  
   169  	case i == fixedRootFinalizers:
   170  		for fb := allfin; fb != nil; fb = fb.alllink {
   171  			cnt := uintptr(atomic.Load(&fb.cnt))
   172  			scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
   173  		}
   174  
   175  	case i == fixedRootFreeGStacks:
   176  		// Switch to the system stack so we can call
   177  		// stackfree.
   178  		systemstack(markrootFreeGStacks)
   179  
   180  	case baseSpans <= i && i < baseStacks:
   181  		// mark mspan.specials
   182  		markrootSpans(gcw, int(i-baseSpans))
   183  
   184  	default:
   185  		// the rest is scanning goroutine stacks
   186  		var gp *g
   187  		if baseStacks <= i && i < end {
   188  			gp = allgs[i-baseStacks]
   189  		} else {
   190  			throw("markroot: bad index")
   191  		}
   192  
   193  		// remember when we've first observed the G blocked
   194  		// needed only to output in traceback
   195  		status := readgstatus(gp) // We are not in a scan state
   196  		if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
   197  			gp.waitsince = work.tstart
   198  		}
   199  
   200  		// scang must be done on the system stack in case
   201  		// we're trying to scan our own stack.
   202  		systemstack(func() {
   203  			// If this is a self-scan, put the user G in
   204  			// _Gwaiting to prevent self-deadlock. It may
   205  			// already be in _Gwaiting if this is a mark
   206  			// worker or we're in mark termination.
   207  			userG := getg().m.curg
   208  			selfScan := gp == userG && readgstatus(userG) == _Grunning
   209  			if selfScan {
   210  				casgstatus(userG, _Grunning, _Gwaiting)
   211  				userG.waitreason = waitReasonGarbageCollectionScan
   212  			}
   213  
   214  			// TODO: scang blocks until gp's stack has
   215  			// been scanned, which may take a while for
   216  			// running goroutines. Consider doing this in
   217  			// two phases where the first is non-blocking:
   218  			// we scan the stacks we can and ask running
   219  			// goroutines to scan themselves; and the
   220  			// second blocks.
   221  			scang(gp, gcw)
   222  
   223  			if selfScan {
   224  				casgstatus(userG, _Gwaiting, _Grunning)
   225  			}
   226  		})
   227  	}
   228  }
   229  
   230  // markrootBlock scans the shard'th shard of the block of memory [b0,
   231  // b0+n0), with the given pointer mask.
   232  //
   233  //go:nowritebarrier
   234  func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
   235  	if rootBlockBytes%(8*sys.PtrSize) != 0 {
   236  		// This is necessary to pick byte offsets in ptrmask0.
   237  		throw("rootBlockBytes must be a multiple of 8*ptrSize")
   238  	}
   239  
   240  	b := b0 + uintptr(shard)*rootBlockBytes
   241  	if b >= b0+n0 {
   242  		return
   243  	}
   244  	ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize))))
   245  	n := uintptr(rootBlockBytes)
   246  	if b+n > b0+n0 {
   247  		n = b0 + n0 - b
   248  	}
   249  
   250  	// Scan this shard.
   251  	scanblock(b, n, ptrmask, gcw, nil)
   252  }
   253  
   254  // markrootFreeGStacks frees stacks of dead Gs.
   255  //
   256  // This does not free stacks of dead Gs cached on Ps, but having a few
   257  // cached stacks around isn't a problem.
   258  //
   259  //TODO go:nowritebarrier
   260  func markrootFreeGStacks() {
   261  	// Take list of dead Gs with stacks.
   262  	lock(&sched.gFree.lock)
   263  	list := sched.gFree.stack
   264  	sched.gFree.stack = gList{}
   265  	unlock(&sched.gFree.lock)
   266  	if list.empty() {
   267  		return
   268  	}
   269  
   270  	// Free stacks.
   271  	q := gQueue{list.head, list.head}
   272  	for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
   273  		shrinkstack(gp)
   274  		// Manipulate the queue directly since the Gs are
   275  		// already all linked the right way.
   276  		q.tail.set(gp)
   277  	}
   278  
   279  	// Put Gs back on the free list.
   280  	lock(&sched.gFree.lock)
   281  	sched.gFree.noStack.pushAll(q)
   282  	unlock(&sched.gFree.lock)
   283  }
   284  
   285  // markrootSpans marks roots for one shard of work.spans.
   286  //
   287  //go:nowritebarrier
   288  func markrootSpans(gcw *gcWork, shard int) {
   289  	// Objects with finalizers have two GC-related invariants:
   290  	//
   291  	// 1) Everything reachable from the object must be marked.
   292  	// This ensures that when we pass the object to its finalizer,
   293  	// everything the finalizer can reach will be retained.
   294  	//
   295  	// 2) Finalizer specials (which are not in the garbage
   296  	// collected heap) are roots. In practice, this means the fn
   297  	// field must be scanned.
   298  	//
   299  	// TODO(austin): There are several ideas for making this more
   300  	// efficient in issue #11485.
   301  
   302  	sg := mheap_.sweepgen
   303  	spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard)
   304  	// Note that work.spans may not include spans that were
   305  	// allocated between entering the scan phase and now. This is
   306  	// okay because any objects with finalizers in those spans
   307  	// must have been allocated and given finalizers after we
   308  	// entered the scan phase, so addfinalizer will have ensured
   309  	// the above invariants for them.
   310  	for _, s := range spans {
   311  		if s.state != mSpanInUse {
   312  			continue
   313  		}
   314  		// Check that this span was swept (it may be cached or uncached).
   315  		if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
   316  			// sweepgen was updated (+2) during non-checkmark GC pass
   317  			print("sweep ", s.sweepgen, " ", sg, "\n")
   318  			throw("gc: unswept span")
   319  		}
   320  
   321  		// Speculatively check if there are any specials
   322  		// without acquiring the span lock. This may race with
   323  		// adding the first special to a span, but in that
   324  		// case addfinalizer will observe that the GC is
   325  		// active (which is globally synchronized) and ensure
   326  		// the above invariants. We may also ensure the
   327  		// invariants, but it's okay to scan an object twice.
   328  		if s.specials == nil {
   329  			continue
   330  		}
   331  
   332  		// Lock the specials to prevent a special from being
   333  		// removed from the list while we're traversing it.
   334  		lock(&s.speciallock)
   335  
   336  		for sp := s.specials; sp != nil; sp = sp.next {
   337  			if sp.kind != _KindSpecialFinalizer {
   338  				continue
   339  			}
   340  			// don't mark finalized object, but scan it so we
   341  			// retain everything it points to.
   342  			spf := (*specialfinalizer)(unsafe.Pointer(sp))
   343  			// A finalizer can be set for an inner byte of an object, find object beginning.
   344  			p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
   345  
   346  			// Mark everything that can be reached from
   347  			// the object (but *not* the object itself or
   348  			// we'll never collect it).
   349  			scanobject(p, gcw)
   350  
   351  			// The special itself is a root.
   352  			scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
   353  		}
   354  
   355  		unlock(&s.speciallock)
   356  	}
   357  }
   358  
   359  // gcAssistAlloc performs GC work to make gp's assist debt positive.
   360  // gp must be the calling user gorountine.
   361  //
   362  // This must be called with preemption enabled.
   363  func gcAssistAlloc(gp *g) {
   364  	// Don't assist in non-preemptible contexts. These are
   365  	// generally fragile and won't allow the assist to block.
   366  	if getg() == gp.m.g0 {
   367  		return
   368  	}
   369  	if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
   370  		return
   371  	}
   372  
   373  	traced := false
   374  retry:
   375  	// Compute the amount of scan work we need to do to make the
   376  	// balance positive. When the required amount of work is low,
   377  	// we over-assist to build up credit for future allocations
   378  	// and amortize the cost of assisting.
   379  	debtBytes := -gp.gcAssistBytes
   380  	scanWork := int64(gcController.assistWorkPerByte * float64(debtBytes))
   381  	if scanWork < gcOverAssistWork {
   382  		scanWork = gcOverAssistWork
   383  		debtBytes = int64(gcController.assistBytesPerWork * float64(scanWork))
   384  	}
   385  
   386  	// Steal as much credit as we can from the background GC's
   387  	// scan credit. This is racy and may drop the background
   388  	// credit below 0 if two mutators steal at the same time. This
   389  	// will just cause steals to fail until credit is accumulated
   390  	// again, so in the long run it doesn't really matter, but we
   391  	// do have to handle the negative credit case.
   392  	bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit)
   393  	stolen := int64(0)
   394  	if bgScanCredit > 0 {
   395  		if bgScanCredit < scanWork {
   396  			stolen = bgScanCredit
   397  			gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(stolen))
   398  		} else {
   399  			stolen = scanWork
   400  			gp.gcAssistBytes += debtBytes
   401  		}
   402  		atomic.Xaddint64(&gcController.bgScanCredit, -stolen)
   403  
   404  		scanWork -= stolen
   405  
   406  		if scanWork == 0 {
   407  			// We were able to steal all of the credit we
   408  			// needed.
   409  			if traced {
   410  				traceGCMarkAssistDone()
   411  			}
   412  			return
   413  		}
   414  	}
   415  
   416  	if trace.enabled && !traced {
   417  		traced = true
   418  		traceGCMarkAssistStart()
   419  	}
   420  
   421  	// Perform assist work
   422  	systemstack(func() {
   423  		gcAssistAlloc1(gp, scanWork)
   424  		// The user stack may have moved, so this can't touch
   425  		// anything on it until it returns from systemstack.
   426  	})
   427  
   428  	completed := gp.param != nil
   429  	gp.param = nil
   430  	if completed {
   431  		gcMarkDone()
   432  	}
   433  
   434  	if gp.gcAssistBytes < 0 {
   435  		// We were unable steal enough credit or perform
   436  		// enough work to pay off the assist debt. We need to
   437  		// do one of these before letting the mutator allocate
   438  		// more to prevent over-allocation.
   439  		//
   440  		// If this is because we were preempted, reschedule
   441  		// and try some more.
   442  		if gp.preempt {
   443  			Gosched()
   444  			goto retry
   445  		}
   446  
   447  		// Add this G to an assist queue and park. When the GC
   448  		// has more background credit, it will satisfy queued
   449  		// assists before flushing to the global credit pool.
   450  		//
   451  		// Note that this does *not* get woken up when more
   452  		// work is added to the work list. The theory is that
   453  		// there wasn't enough work to do anyway, so we might
   454  		// as well let background marking take care of the
   455  		// work that is available.
   456  		if !gcParkAssist() {
   457  			goto retry
   458  		}
   459  
   460  		// At this point either background GC has satisfied
   461  		// this G's assist debt, or the GC cycle is over.
   462  	}
   463  	if traced {
   464  		traceGCMarkAssistDone()
   465  	}
   466  }
   467  
   468  // gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system
   469  // stack. This is a separate function to make it easier to see that
   470  // we're not capturing anything from the user stack, since the user
   471  // stack may move while we're in this function.
   472  //
   473  // gcAssistAlloc1 indicates whether this assist completed the mark
   474  // phase by setting gp.param to non-nil. This can't be communicated on
   475  // the stack since it may move.
   476  //
   477  //go:systemstack
   478  func gcAssistAlloc1(gp *g, scanWork int64) {
   479  	// Clear the flag indicating that this assist completed the
   480  	// mark phase.
   481  	gp.param = nil
   482  
   483  	if atomic.Load(&gcBlackenEnabled) == 0 {
   484  		// The gcBlackenEnabled check in malloc races with the
   485  		// store that clears it but an atomic check in every malloc
   486  		// would be a performance hit.
   487  		// Instead we recheck it here on the non-preemptable system
   488  		// stack to determine if we should perform an assist.
   489  
   490  		// GC is done, so ignore any remaining debt.
   491  		gp.gcAssistBytes = 0
   492  		return
   493  	}
   494  	// Track time spent in this assist. Since we're on the
   495  	// system stack, this is non-preemptible, so we can
   496  	// just measure start and end time.
   497  	startTime := nanotime()
   498  
   499  	decnwait := atomic.Xadd(&work.nwait, -1)
   500  	if decnwait == work.nproc {
   501  		println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
   502  		throw("nwait > work.nprocs")
   503  	}
   504  
   505  	// gcDrainN requires the caller to be preemptible.
   506  	casgstatus(gp, _Grunning, _Gwaiting)
   507  	gp.waitreason = waitReasonGCAssistMarking
   508  
   509  	// drain own cached work first in the hopes that it
   510  	// will be more cache friendly.
   511  	gcw := &getg().m.p.ptr().gcw
   512  	workDone := gcDrainN(gcw, scanWork)
   513  
   514  	casgstatus(gp, _Gwaiting, _Grunning)
   515  
   516  	// Record that we did this much scan work.
   517  	//
   518  	// Back out the number of bytes of assist credit that
   519  	// this scan work counts for. The "1+" is a poor man's
   520  	// round-up, to ensure this adds credit even if
   521  	// assistBytesPerWork is very low.
   522  	gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(workDone))
   523  
   524  	// If this is the last worker and we ran out of work,
   525  	// signal a completion point.
   526  	incnwait := atomic.Xadd(&work.nwait, +1)
   527  	if incnwait > work.nproc {
   528  		println("runtime: work.nwait=", incnwait,
   529  			"work.nproc=", work.nproc)
   530  		throw("work.nwait > work.nproc")
   531  	}
   532  
   533  	if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
   534  		// This has reached a background completion point. Set
   535  		// gp.param to a non-nil value to indicate this. It
   536  		// doesn't matter what we set it to (it just has to be
   537  		// a valid pointer).
   538  		gp.param = unsafe.Pointer(gp)
   539  	}
   540  	duration := nanotime() - startTime
   541  	_p_ := gp.m.p.ptr()
   542  	_p_.gcAssistTime += duration
   543  	if _p_.gcAssistTime > gcAssistTimeSlack {
   544  		atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime)
   545  		_p_.gcAssistTime = 0
   546  	}
   547  }
   548  
   549  // gcWakeAllAssists wakes all currently blocked assists. This is used
   550  // at the end of a GC cycle. gcBlackenEnabled must be false to prevent
   551  // new assists from going to sleep after this point.
   552  func gcWakeAllAssists() {
   553  	lock(&work.assistQueue.lock)
   554  	list := work.assistQueue.q.popList()
   555  	injectglist(&list)
   556  	unlock(&work.assistQueue.lock)
   557  }
   558  
   559  // gcParkAssist puts the current goroutine on the assist queue and parks.
   560  //
   561  // gcParkAssist reports whether the assist is now satisfied. If it
   562  // returns false, the caller must retry the assist.
   563  //
   564  //go:nowritebarrier
   565  func gcParkAssist() bool {
   566  	lock(&work.assistQueue.lock)
   567  	// If the GC cycle finished while we were getting the lock,
   568  	// exit the assist. The cycle can't finish while we hold the
   569  	// lock.
   570  	if atomic.Load(&gcBlackenEnabled) == 0 {
   571  		unlock(&work.assistQueue.lock)
   572  		return true
   573  	}
   574  
   575  	gp := getg()
   576  	oldList := work.assistQueue.q
   577  	work.assistQueue.q.pushBack(gp)
   578  
   579  	// Recheck for background credit now that this G is in
   580  	// the queue, but can still back out. This avoids a
   581  	// race in case background marking has flushed more
   582  	// credit since we checked above.
   583  	if atomic.Loadint64(&gcController.bgScanCredit) > 0 {
   584  		work.assistQueue.q = oldList
   585  		if oldList.tail != 0 {
   586  			oldList.tail.ptr().schedlink.set(nil)
   587  		}
   588  		unlock(&work.assistQueue.lock)
   589  		return false
   590  	}
   591  	// Park.
   592  	goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2)
   593  	return true
   594  }
   595  
   596  // gcFlushBgCredit flushes scanWork units of background scan work
   597  // credit. This first satisfies blocked assists on the
   598  // work.assistQueue and then flushes any remaining credit to
   599  // gcController.bgScanCredit.
   600  //
   601  // Write barriers are disallowed because this is used by gcDrain after
   602  // it has ensured that all work is drained and this must preserve that
   603  // condition.
   604  //
   605  //go:nowritebarrierrec
   606  func gcFlushBgCredit(scanWork int64) {
   607  	if work.assistQueue.q.empty() {
   608  		// Fast path; there are no blocked assists. There's a
   609  		// small window here where an assist may add itself to
   610  		// the blocked queue and park. If that happens, we'll
   611  		// just get it on the next flush.
   612  		atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
   613  		return
   614  	}
   615  
   616  	scanBytes := int64(float64(scanWork) * gcController.assistBytesPerWork)
   617  
   618  	lock(&work.assistQueue.lock)
   619  	for !work.assistQueue.q.empty() && scanBytes > 0 {
   620  		gp := work.assistQueue.q.pop()
   621  		// Note that gp.gcAssistBytes is negative because gp
   622  		// is in debt. Think carefully about the signs below.
   623  		if scanBytes+gp.gcAssistBytes >= 0 {
   624  			// Satisfy this entire assist debt.
   625  			scanBytes += gp.gcAssistBytes
   626  			gp.gcAssistBytes = 0
   627  			// It's important that we *not* put gp in
   628  			// runnext. Otherwise, it's possible for user
   629  			// code to exploit the GC worker's high
   630  			// scheduler priority to get itself always run
   631  			// before other goroutines and always in the
   632  			// fresh quantum started by GC.
   633  			ready(gp, 0, false)
   634  		} else {
   635  			// Partially satisfy this assist.
   636  			gp.gcAssistBytes += scanBytes
   637  			scanBytes = 0
   638  			// As a heuristic, we move this assist to the
   639  			// back of the queue so that large assists
   640  			// can't clog up the assist queue and
   641  			// substantially delay small assists.
   642  			work.assistQueue.q.pushBack(gp)
   643  			break
   644  		}
   645  	}
   646  
   647  	if scanBytes > 0 {
   648  		// Convert from scan bytes back to work.
   649  		scanWork = int64(float64(scanBytes) * gcController.assistWorkPerByte)
   650  		atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
   651  	}
   652  	unlock(&work.assistQueue.lock)
   653  }
   654  
   655  // scanstack scans gp's stack, greying all pointers found on the stack.
   656  //
   657  // scanstack is marked go:systemstack because it must not be preempted
   658  // while using a workbuf.
   659  //
   660  //go:nowritebarrier
   661  //go:systemstack
   662  func scanstack(gp *g, gcw *gcWork) {
   663  	if gp.gcscanvalid {
   664  		return
   665  	}
   666  
   667  	if readgstatus(gp)&_Gscan == 0 {
   668  		print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
   669  		throw("scanstack - bad status")
   670  	}
   671  
   672  	switch readgstatus(gp) &^ _Gscan {
   673  	default:
   674  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   675  		throw("mark - bad status")
   676  	case _Gdead:
   677  		return
   678  	case _Grunning:
   679  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   680  		throw("scanstack: goroutine not stopped")
   681  	case _Grunnable, _Gsyscall, _Gwaiting:
   682  		// ok
   683  	}
   684  
   685  	if gp == getg() {
   686  		throw("can't scan our own stack")
   687  	}
   688  
   689  	// Shrink the stack if not much of it is being used.
   690  	shrinkstack(gp)
   691  
   692  	var state stackScanState
   693  	state.stack = gp.stack
   694  
   695  	if stackTraceDebug {
   696  		println("stack trace goroutine", gp.goid)
   697  	}
   698  
   699  	// Scan the saved context register. This is effectively a live
   700  	// register that gets moved back and forth between the
   701  	// register and sched.ctxt without a write barrier.
   702  	if gp.sched.ctxt != nil {
   703  		scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw, &state)
   704  	}
   705  
   706  	// Scan the stack. Accumulate a list of stack objects.
   707  	scanframe := func(frame *stkframe, unused unsafe.Pointer) bool {
   708  		scanframeworker(frame, &state, gcw)
   709  		return true
   710  	}
   711  	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
   712  
   713  	// Find additional pointers that point into the stack from the heap.
   714  	// Currently this includes defers and panics. See also function copystack.
   715  	tracebackdefers(gp, scanframe, nil)
   716  	for d := gp._defer; d != nil; d = d.link {
   717  		// tracebackdefers above does not scan the func value, which could
   718  		// be a stack allocated closure. See issue 30453.
   719  		if d.fn != nil {
   720  			scanblock(uintptr(unsafe.Pointer(&d.fn)), sys.PtrSize, &oneptrmask[0], gcw, &state)
   721  		}
   722  	}
   723  	if gp._panic != nil {
   724  		state.putPtr(uintptr(unsafe.Pointer(gp._panic)))
   725  	}
   726  
   727  	// Find and scan all reachable stack objects.
   728  	state.buildIndex()
   729  	for {
   730  		p := state.getPtr()
   731  		if p == 0 {
   732  			break
   733  		}
   734  		obj := state.findObject(p)
   735  		if obj == nil {
   736  			continue
   737  		}
   738  		t := obj.typ
   739  		if t == nil {
   740  			// We've already scanned this object.
   741  			continue
   742  		}
   743  		obj.setType(nil) // Don't scan it again.
   744  		if stackTraceDebug {
   745  			println("  live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of type", t.string())
   746  		}
   747  		gcdata := t.gcdata
   748  		var s *mspan
   749  		if t.kind&kindGCProg != 0 {
   750  			// This path is pretty unlikely, an object large enough
   751  			// to have a GC program allocated on the stack.
   752  			// We need some space to unpack the program into a straight
   753  			// bitmask, which we allocate/free here.
   754  			// TODO: it would be nice if there were a way to run a GC
   755  			// program without having to store all its bits. We'd have
   756  			// to change from a Lempel-Ziv style program to something else.
   757  			// Or we can forbid putting objects on stacks if they require
   758  			// a gc program (see issue 27447).
   759  			s = materializeGCProg(t.ptrdata, gcdata)
   760  			gcdata = (*byte)(unsafe.Pointer(s.startAddr))
   761  		}
   762  
   763  		scanblock(state.stack.lo+uintptr(obj.off), t.ptrdata, gcdata, gcw, &state)
   764  
   765  		if s != nil {
   766  			dematerializeGCProg(s)
   767  		}
   768  	}
   769  
   770  	// Deallocate object buffers.
   771  	// (Pointer buffers were all deallocated in the loop above.)
   772  	for state.head != nil {
   773  		x := state.head
   774  		state.head = x.next
   775  		if stackTraceDebug {
   776  			for _, obj := range x.obj[:x.nobj] {
   777  				if obj.typ == nil { // reachable
   778  					continue
   779  				}
   780  				println("  dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of type", obj.typ.string())
   781  				// Note: not necessarily really dead - only reachable-from-ptr dead.
   782  			}
   783  		}
   784  		x.nobj = 0
   785  		putempty((*workbuf)(unsafe.Pointer(x)))
   786  	}
   787  	if state.buf != nil || state.freeBuf != nil {
   788  		throw("remaining pointer buffers")
   789  	}
   790  
   791  	gp.gcscanvalid = true
   792  }
   793  
   794  // Scan a stack frame: local variables and function arguments/results.
   795  //go:nowritebarrier
   796  func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
   797  	if _DebugGC > 1 && frame.continpc != 0 {
   798  		print("scanframe ", funcname(frame.fn), "\n")
   799  	}
   800  
   801  	locals, args, objs := getStackMap(frame, &state.cache, false)
   802  
   803  	// Scan local variables if stack frame has been allocated.
   804  	if locals.n > 0 {
   805  		size := uintptr(locals.n) * sys.PtrSize
   806  		scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
   807  	}
   808  
   809  	// Scan arguments.
   810  	if args.n > 0 {
   811  		scanblock(frame.argp, uintptr(args.n)*sys.PtrSize, args.bytedata, gcw, state)
   812  	}
   813  
   814  	// Add all stack objects to the stack object list.
   815  	if frame.varp != 0 {
   816  		// varp is 0 for defers, where there are no locals.
   817  		// In that case, there can't be a pointer to its args, either.
   818  		// (And all args would be scanned above anyway.)
   819  		for _, obj := range objs {
   820  			off := obj.off
   821  			base := frame.varp // locals base pointer
   822  			if off >= 0 {
   823  				base = frame.argp // arguments and return values base pointer
   824  			}
   825  			ptr := base + uintptr(off)
   826  			if ptr < frame.sp {
   827  				// object hasn't been allocated in the frame yet.
   828  				continue
   829  			}
   830  			if stackTraceDebug {
   831  				println("stkobj at", hex(ptr), "of type", obj.typ.string())
   832  			}
   833  			state.addObject(ptr, obj.typ)
   834  		}
   835  	}
   836  }
   837  
   838  type gcDrainFlags int
   839  
   840  const (
   841  	gcDrainUntilPreempt gcDrainFlags = 1 << iota
   842  	gcDrainFlushBgCredit
   843  	gcDrainIdle
   844  	gcDrainFractional
   845  )
   846  
   847  // gcDrain scans roots and objects in work buffers, blackening grey
   848  // objects until it is unable to get more work. It may return before
   849  // GC is done; it's the caller's responsibility to balance work from
   850  // other Ps.
   851  //
   852  // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
   853  // is set.
   854  //
   855  // If flags&gcDrainIdle != 0, gcDrain returns when there is other work
   856  // to do.
   857  //
   858  // If flags&gcDrainFractional != 0, gcDrain self-preempts when
   859  // pollFractionalWorkerExit() returns true. This implies
   860  // gcDrainNoBlock.
   861  //
   862  // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
   863  // credit to gcController.bgScanCredit every gcCreditSlack units of
   864  // scan work.
   865  //
   866  //go:nowritebarrier
   867  func gcDrain(gcw *gcWork, flags gcDrainFlags) {
   868  	if !writeBarrier.needed {
   869  		throw("gcDrain phase incorrect")
   870  	}
   871  
   872  	gp := getg().m.curg
   873  	preemptible := flags&gcDrainUntilPreempt != 0
   874  	flushBgCredit := flags&gcDrainFlushBgCredit != 0
   875  	idle := flags&gcDrainIdle != 0
   876  
   877  	initScanWork := gcw.scanWork
   878  
   879  	// checkWork is the scan work before performing the next
   880  	// self-preempt check.
   881  	checkWork := int64(1<<63 - 1)
   882  	var check func() bool
   883  	if flags&(gcDrainIdle|gcDrainFractional) != 0 {
   884  		checkWork = initScanWork + drainCheckThreshold
   885  		if idle {
   886  			check = pollWork
   887  		} else if flags&gcDrainFractional != 0 {
   888  			check = pollFractionalWorkerExit
   889  		}
   890  	}
   891  
   892  	// Drain root marking jobs.
   893  	if work.markrootNext < work.markrootJobs {
   894  		for !(preemptible && gp.preempt) {
   895  			job := atomic.Xadd(&work.markrootNext, +1) - 1
   896  			if job >= work.markrootJobs {
   897  				break
   898  			}
   899  			markroot(gcw, job)
   900  			if check != nil && check() {
   901  				goto done
   902  			}
   903  		}
   904  	}
   905  
   906  	// Drain heap marking jobs.
   907  	for !(preemptible && gp.preempt) {
   908  		// Try to keep work available on the global queue. We used to
   909  		// check if there were waiting workers, but it's better to
   910  		// just keep work available than to make workers wait. In the
   911  		// worst case, we'll do O(log(_WorkbufSize)) unnecessary
   912  		// balances.
   913  		if work.full == 0 {
   914  			gcw.balance()
   915  		}
   916  
   917  		b := gcw.tryGetFast()
   918  		if b == 0 {
   919  			b = gcw.tryGet()
   920  			if b == 0 {
   921  				// Flush the write barrier
   922  				// buffer; this may create
   923  				// more work.
   924  				wbBufFlush(nil, 0)
   925  				b = gcw.tryGet()
   926  			}
   927  		}
   928  		if b == 0 {
   929  			// Unable to get work.
   930  			break
   931  		}
   932  		scanobject(b, gcw)
   933  
   934  		// Flush background scan work credit to the global
   935  		// account if we've accumulated enough locally so
   936  		// mutator assists can draw on it.
   937  		if gcw.scanWork >= gcCreditSlack {
   938  			atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
   939  			if flushBgCredit {
   940  				gcFlushBgCredit(gcw.scanWork - initScanWork)
   941  				initScanWork = 0
   942  			}
   943  			checkWork -= gcw.scanWork
   944  			gcw.scanWork = 0
   945  
   946  			if checkWork <= 0 {
   947  				checkWork += drainCheckThreshold
   948  				if check != nil && check() {
   949  					break
   950  				}
   951  			}
   952  		}
   953  	}
   954  
   955  done:
   956  	// Flush remaining scan work credit.
   957  	if gcw.scanWork > 0 {
   958  		atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
   959  		if flushBgCredit {
   960  			gcFlushBgCredit(gcw.scanWork - initScanWork)
   961  		}
   962  		gcw.scanWork = 0
   963  	}
   964  }
   965  
   966  // gcDrainN blackens grey objects until it has performed roughly
   967  // scanWork units of scan work or the G is preempted. This is
   968  // best-effort, so it may perform less work if it fails to get a work
   969  // buffer. Otherwise, it will perform at least n units of work, but
   970  // may perform more because scanning is always done in whole object
   971  // increments. It returns the amount of scan work performed.
   972  //
   973  // The caller goroutine must be in a preemptible state (e.g.,
   974  // _Gwaiting) to prevent deadlocks during stack scanning. As a
   975  // consequence, this must be called on the system stack.
   976  //
   977  //go:nowritebarrier
   978  //go:systemstack
   979  func gcDrainN(gcw *gcWork, scanWork int64) int64 {
   980  	if !writeBarrier.needed {
   981  		throw("gcDrainN phase incorrect")
   982  	}
   983  
   984  	// There may already be scan work on the gcw, which we don't
   985  	// want to claim was done by this call.
   986  	workFlushed := -gcw.scanWork
   987  
   988  	gp := getg().m.curg
   989  	for !gp.preempt && workFlushed+gcw.scanWork < scanWork {
   990  		// See gcDrain comment.
   991  		if work.full == 0 {
   992  			gcw.balance()
   993  		}
   994  
   995  		// This might be a good place to add prefetch code...
   996  		// if(wbuf.nobj > 4) {
   997  		//         PREFETCH(wbuf->obj[wbuf.nobj - 3];
   998  		//  }
   999  		//
  1000  		b := gcw.tryGetFast()
  1001  		if b == 0 {
  1002  			b = gcw.tryGet()
  1003  			if b == 0 {
  1004  				// Flush the write barrier buffer;
  1005  				// this may create more work.
  1006  				wbBufFlush(nil, 0)
  1007  				b = gcw.tryGet()
  1008  			}
  1009  		}
  1010  
  1011  		if b == 0 {
  1012  			// Try to do a root job.
  1013  			//
  1014  			// TODO: Assists should get credit for this
  1015  			// work.
  1016  			if work.markrootNext < work.markrootJobs {
  1017  				job := atomic.Xadd(&work.markrootNext, +1) - 1
  1018  				if job < work.markrootJobs {
  1019  					markroot(gcw, job)
  1020  					continue
  1021  				}
  1022  			}
  1023  			// No heap or root jobs.
  1024  			break
  1025  		}
  1026  		scanobject(b, gcw)
  1027  
  1028  		// Flush background scan work credit.
  1029  		if gcw.scanWork >= gcCreditSlack {
  1030  			atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
  1031  			workFlushed += gcw.scanWork
  1032  			gcw.scanWork = 0
  1033  		}
  1034  	}
  1035  
  1036  	// Unlike gcDrain, there's no need to flush remaining work
  1037  	// here because this never flushes to bgScanCredit and
  1038  	// gcw.dispose will flush any remaining work to scanWork.
  1039  
  1040  	return workFlushed + gcw.scanWork
  1041  }
  1042  
  1043  // scanblock scans b as scanobject would, but using an explicit
  1044  // pointer bitmap instead of the heap bitmap.
  1045  //
  1046  // This is used to scan non-heap roots, so it does not update
  1047  // gcw.bytesMarked or gcw.scanWork.
  1048  //
  1049  // If stk != nil, possible stack pointers are also reported to stk.putPtr.
  1050  //go:nowritebarrier
  1051  func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) {
  1052  	// Use local copies of original parameters, so that a stack trace
  1053  	// due to one of the throws below shows the original block
  1054  	// base and extent.
  1055  	b := b0
  1056  	n := n0
  1057  
  1058  	for i := uintptr(0); i < n; {
  1059  		// Find bits for the next word.
  1060  		bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
  1061  		if bits == 0 {
  1062  			i += sys.PtrSize * 8
  1063  			continue
  1064  		}
  1065  		for j := 0; j < 8 && i < n; j++ {
  1066  			if bits&1 != 0 {
  1067  				// Same work as in scanobject; see comments there.
  1068  				p := *(*uintptr)(unsafe.Pointer(b + i))
  1069  				if p != 0 {
  1070  					if obj, span, objIndex := findObject(p, b, i); obj != 0 {
  1071  						greyobject(obj, b, i, span, gcw, objIndex)
  1072  					} else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi {
  1073  						stk.putPtr(p)
  1074  					}
  1075  				}
  1076  			}
  1077  			bits >>= 1
  1078  			i += sys.PtrSize
  1079  		}
  1080  	}
  1081  }
  1082  
  1083  // scanobject scans the object starting at b, adding pointers to gcw.
  1084  // b must point to the beginning of a heap object or an oblet.
  1085  // scanobject consults the GC bitmap for the pointer mask and the
  1086  // spans for the size of the object.
  1087  //
  1088  //go:nowritebarrier
  1089  func scanobject(b uintptr, gcw *gcWork) {
  1090  	// Find the bits for b and the size of the object at b.
  1091  	//
  1092  	// b is either the beginning of an object, in which case this
  1093  	// is the size of the object to scan, or it points to an
  1094  	// oblet, in which case we compute the size to scan below.
  1095  	hbits := heapBitsForAddr(b)
  1096  	s := spanOfUnchecked(b)
  1097  	n := s.elemsize
  1098  	if n == 0 {
  1099  		throw("scanobject n == 0")
  1100  	}
  1101  
  1102  	if n > maxObletBytes {
  1103  		// Large object. Break into oblets for better
  1104  		// parallelism and lower latency.
  1105  		if b == s.base() {
  1106  			// It's possible this is a noscan object (not
  1107  			// from greyobject, but from other code
  1108  			// paths), in which case we must *not* enqueue
  1109  			// oblets since their bitmaps will be
  1110  			// uninitialized.
  1111  			if s.spanclass.noscan() {
  1112  				// Bypass the whole scan.
  1113  				gcw.bytesMarked += uint64(n)
  1114  				return
  1115  			}
  1116  
  1117  			// Enqueue the other oblets to scan later.
  1118  			// Some oblets may be in b's scalar tail, but
  1119  			// these will be marked as "no more pointers",
  1120  			// so we'll drop out immediately when we go to
  1121  			// scan those.
  1122  			for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
  1123  				if !gcw.putFast(oblet) {
  1124  					gcw.put(oblet)
  1125  				}
  1126  			}
  1127  		}
  1128  
  1129  		// Compute the size of the oblet. Since this object
  1130  		// must be a large object, s.base() is the beginning
  1131  		// of the object.
  1132  		n = s.base() + s.elemsize - b
  1133  		if n > maxObletBytes {
  1134  			n = maxObletBytes
  1135  		}
  1136  	}
  1137  
  1138  	var i uintptr
  1139  	for i = 0; i < n; i += sys.PtrSize {
  1140  		// Find bits for this word.
  1141  		if i != 0 {
  1142  			// Avoid needless hbits.next() on last iteration.
  1143  			hbits = hbits.next()
  1144  		}
  1145  		// Load bits once. See CL 22712 and issue 16973 for discussion.
  1146  		bits := hbits.bits()
  1147  		// During checkmarking, 1-word objects store the checkmark
  1148  		// in the type bit for the one word. The only one-word objects
  1149  		// are pointers, or else they'd be merged with other non-pointer
  1150  		// data into larger allocations.
  1151  		if i != 1*sys.PtrSize && bits&bitScan == 0 {
  1152  			break // no more pointers in this object
  1153  		}
  1154  		if bits&bitPointer == 0 {
  1155  			continue // not a pointer
  1156  		}
  1157  
  1158  		// Work here is duplicated in scanblock and above.
  1159  		// If you make changes here, make changes there too.
  1160  		obj := *(*uintptr)(unsafe.Pointer(b + i))
  1161  
  1162  		// At this point we have extracted the next potential pointer.
  1163  		// Quickly filter out nil and pointers back to the current object.
  1164  		if obj != 0 && obj-b >= n {
  1165  			// Test if obj points into the Go heap and, if so,
  1166  			// mark the object.
  1167  			//
  1168  			// Note that it's possible for findObject to
  1169  			// fail if obj points to a just-allocated heap
  1170  			// object because of a race with growing the
  1171  			// heap. In this case, we know the object was
  1172  			// just allocated and hence will be marked by
  1173  			// allocation itself.
  1174  			if obj, span, objIndex := findObject(obj, b, i); obj != 0 {
  1175  				greyobject(obj, b, i, span, gcw, objIndex)
  1176  			}
  1177  		}
  1178  	}
  1179  	gcw.bytesMarked += uint64(n)
  1180  	gcw.scanWork += int64(i)
  1181  }
  1182  
  1183  // Shade the object if it isn't already.
  1184  // The object is not nil and known to be in the heap.
  1185  // Preemption must be disabled.
  1186  //go:nowritebarrier
  1187  func shade(b uintptr) {
  1188  	if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {
  1189  		gcw := &getg().m.p.ptr().gcw
  1190  		greyobject(obj, 0, 0, span, gcw, objIndex)
  1191  	}
  1192  }
  1193  
  1194  // obj is the start of an object with mark mbits.
  1195  // If it isn't already marked, mark it and enqueue into gcw.
  1196  // base and off are for debugging only and could be removed.
  1197  //
  1198  // See also wbBufFlush1, which partially duplicates this logic.
  1199  //
  1200  //go:nowritebarrierrec
  1201  func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
  1202  	// obj should be start of allocation, and so must be at least pointer-aligned.
  1203  	if obj&(sys.PtrSize-1) != 0 {
  1204  		throw("greyobject: obj not pointer-aligned")
  1205  	}
  1206  	mbits := span.markBitsForIndex(objIndex)
  1207  
  1208  	if useCheckmark {
  1209  		if !mbits.isMarked() {
  1210  			printlock()
  1211  			print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n")
  1212  			print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n")
  1213  
  1214  			// Dump the source (base) object
  1215  			gcDumpObject("base", base, off)
  1216  
  1217  			// Dump the object
  1218  			gcDumpObject("obj", obj, ^uintptr(0))
  1219  
  1220  			getg().m.traceback = 2
  1221  			throw("checkmark found unmarked object")
  1222  		}
  1223  		hbits := heapBitsForAddr(obj)
  1224  		if hbits.isCheckmarked(span.elemsize) {
  1225  			return
  1226  		}
  1227  		hbits.setCheckmarked(span.elemsize)
  1228  		if !hbits.isCheckmarked(span.elemsize) {
  1229  			throw("setCheckmarked and isCheckmarked disagree")
  1230  		}
  1231  	} else {
  1232  		if debug.gccheckmark > 0 && span.isFree(objIndex) {
  1233  			print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
  1234  			gcDumpObject("base", base, off)
  1235  			gcDumpObject("obj", obj, ^uintptr(0))
  1236  			getg().m.traceback = 2
  1237  			throw("marking free object")
  1238  		}
  1239  
  1240  		// If marked we have nothing to do.
  1241  		if mbits.isMarked() {
  1242  			return
  1243  		}
  1244  		mbits.setMarked()
  1245  
  1246  		// Mark span.
  1247  		arena, pageIdx, pageMask := pageIndexOf(span.base())
  1248  		if arena.pageMarks[pageIdx]&pageMask == 0 {
  1249  			atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
  1250  		}
  1251  
  1252  		// If this is a noscan object, fast-track it to black
  1253  		// instead of greying it.
  1254  		if span.spanclass.noscan() {
  1255  			gcw.bytesMarked += uint64(span.elemsize)
  1256  			return
  1257  		}
  1258  	}
  1259  
  1260  	// Queue the obj for scanning. The PREFETCH(obj) logic has been removed but
  1261  	// seems like a nice optimization that can be added back in.
  1262  	// There needs to be time between the PREFETCH and the use.
  1263  	// Previously we put the obj in an 8 element buffer that is drained at a rate
  1264  	// to give the PREFETCH time to do its work.
  1265  	// Use of PREFETCHNTA might be more appropriate than PREFETCH
  1266  	if !gcw.putFast(obj) {
  1267  		gcw.put(obj)
  1268  	}
  1269  }
  1270  
  1271  // gcDumpObject dumps the contents of obj for debugging and marks the
  1272  // field at byte offset off in obj.
  1273  func gcDumpObject(label string, obj, off uintptr) {
  1274  	s := spanOf(obj)
  1275  	print(label, "=", hex(obj))
  1276  	if s == nil {
  1277  		print(" s=nil\n")
  1278  		return
  1279  	}
  1280  	print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
  1281  	if 0 <= s.state && int(s.state) < len(mSpanStateNames) {
  1282  		print(mSpanStateNames[s.state], "\n")
  1283  	} else {
  1284  		print("unknown(", s.state, ")\n")
  1285  	}
  1286  
  1287  	skipped := false
  1288  	size := s.elemsize
  1289  	if s.state == mSpanManual && size == 0 {
  1290  		// We're printing something from a stack frame. We
  1291  		// don't know how big it is, so just show up to an
  1292  		// including off.
  1293  		size = off + sys.PtrSize
  1294  	}
  1295  	for i := uintptr(0); i < size; i += sys.PtrSize {
  1296  		// For big objects, just print the beginning (because
  1297  		// that usually hints at the object's type) and the
  1298  		// fields around off.
  1299  		if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) {
  1300  			skipped = true
  1301  			continue
  1302  		}
  1303  		if skipped {
  1304  			print(" ...\n")
  1305  			skipped = false
  1306  		}
  1307  		print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
  1308  		if i == off {
  1309  			print(" <==")
  1310  		}
  1311  		print("\n")
  1312  	}
  1313  	if skipped {
  1314  		print(" ...\n")
  1315  	}
  1316  }
  1317  
  1318  // gcmarknewobject marks a newly allocated object black. obj must
  1319  // not contain any non-nil pointers.
  1320  //
  1321  // This is nosplit so it can manipulate a gcWork without preemption.
  1322  //
  1323  //go:nowritebarrier
  1324  //go:nosplit
  1325  func gcmarknewobject(obj, size, scanSize uintptr) {
  1326  	if useCheckmark { // The world should be stopped so this should not happen.
  1327  		throw("gcmarknewobject called while doing checkmark")
  1328  	}
  1329  	markBitsForAddr(obj).setMarked()
  1330  	gcw := &getg().m.p.ptr().gcw
  1331  	gcw.bytesMarked += uint64(size)
  1332  	gcw.scanWork += int64(scanSize)
  1333  }
  1334  
  1335  // gcMarkTinyAllocs greys all active tiny alloc blocks.
  1336  //
  1337  // The world must be stopped.
  1338  func gcMarkTinyAllocs() {
  1339  	for _, p := range allp {
  1340  		c := p.mcache
  1341  		if c == nil || c.tiny == 0 {
  1342  			continue
  1343  		}
  1344  		_, span, objIndex := findObject(c.tiny, 0, 0)
  1345  		gcw := &p.gcw
  1346  		greyobject(c.tiny, 0, 0, span, gcw, objIndex)
  1347  	}
  1348  }
  1349  
  1350  // Checkmarking
  1351  
  1352  // To help debug the concurrent GC we remark with the world
  1353  // stopped ensuring that any object encountered has their normal
  1354  // mark bit set. To do this we use an orthogonal bit
  1355  // pattern to indicate the object is marked. The following pattern
  1356  // uses the upper two bits in the object's boundary nibble.
  1357  // 01: scalar  not marked
  1358  // 10: pointer not marked
  1359  // 11: pointer     marked
  1360  // 00: scalar      marked
  1361  // Xoring with 01 will flip the pattern from marked to unmarked and vica versa.
  1362  // The higher bit is 1 for pointers and 0 for scalars, whether the object
  1363  // is marked or not.
  1364  // The first nibble no longer holds the typeDead pattern indicating that the
  1365  // there are no more pointers in the object. This information is held
  1366  // in the second nibble.
  1367  
  1368  // If useCheckmark is true, marking of an object uses the
  1369  // checkmark bits (encoding above) instead of the standard
  1370  // mark bits.
  1371  var useCheckmark = false
  1372  
  1373  //go:nowritebarrier
  1374  func initCheckmarks() {
  1375  	useCheckmark = true
  1376  	for _, s := range mheap_.allspans {
  1377  		if s.state == mSpanInUse {
  1378  			heapBitsForAddr(s.base()).initCheckmarkSpan(s.layout())
  1379  		}
  1380  	}
  1381  }
  1382  
  1383  func clearCheckmarks() {
  1384  	useCheckmark = false
  1385  	for _, s := range mheap_.allspans {
  1386  		if s.state == mSpanInUse {
  1387  			heapBitsForAddr(s.base()).clearCheckmarkSpan(s.layout())
  1388  		}
  1389  	}
  1390  }
  1391  

View as plain text