Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/mgcmark.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: marking and scanning
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  const (
    16  	fixedRootFinalizers = iota
    17  	fixedRootFreeGStacks
    18  	fixedRootCount
    19  
    20  	// rootBlockBytes is the number of bytes to scan per data or
    21  	// BSS root.
    22  	rootBlockBytes = 256 << 10
    23  
    24  	// maxObletBytes is the maximum bytes of an object to scan at
    25  	// once. Larger objects will be split up into "oblets" of at
    26  	// most this size. Since we can scan 1–2 MB/ms, 128 KB bounds
    27  	// scan preemption at ~100 µs.
    28  	//
    29  	// This must be > _MaxSmallSize so that the object base is the
    30  	// span base.
    31  	maxObletBytes = 128 << 10
    32  
    33  	// drainCheckThreshold specifies how many units of work to do
    34  	// between self-preemption checks in gcDrain. Assuming a scan
    35  	// rate of 1 MB/ms, this is ~100 µs. Lower values have higher
    36  	// overhead in the scan loop (the scheduler check may perform
    37  	// a syscall, so its overhead is nontrivial). Higher values
    38  	// make the system less responsive to incoming work.
    39  	drainCheckThreshold = 100000
    40  
    41  	// pagesPerSpanRoot indicates how many pages to scan from a span root
    42  	// at a time. Used by special root marking.
    43  	//
    44  	// Higher values improve throughput by increasing locality, but
    45  	// increase the minimum latency of a marking operation.
    46  	//
    47  	// Must be a multiple of the pageInUse bitmap element size and
    48  	// must also evenly divide pagesPerArena.
    49  	pagesPerSpanRoot = 512
    50  
    51  	// go115NewMarkrootSpans is a feature flag that indicates whether
    52  	// to use the new bitmap-based markrootSpans implementation.
    53  	go115NewMarkrootSpans = true
    54  )
    55  
    56  // gcMarkRootPrepare queues root scanning jobs (stacks, globals, and
    57  // some miscellany) and initializes scanning-related state.
    58  //
    59  // The world must be stopped.
    60  func gcMarkRootPrepare() {
    61  	work.nFlushCacheRoots = 0
    62  
    63  	// Compute how many data and BSS root blocks there are.
    64  	nBlocks := func(bytes uintptr) int {
    65  		return int(divRoundUp(bytes, rootBlockBytes))
    66  	}
    67  
    68  	work.nDataRoots = 0
    69  	work.nBSSRoots = 0
    70  
    71  	// Scan globals.
    72  	for _, datap := range activeModules() {
    73  		nDataRoots := nBlocks(datap.edata - datap.data)
    74  		if nDataRoots > work.nDataRoots {
    75  			work.nDataRoots = nDataRoots
    76  		}
    77  	}
    78  
    79  	for _, datap := range activeModules() {
    80  		nBSSRoots := nBlocks(datap.ebss - datap.bss)
    81  		if nBSSRoots > work.nBSSRoots {
    82  			work.nBSSRoots = nBSSRoots
    83  		}
    84  	}
    85  
    86  	// Scan span roots for finalizer specials.
    87  	//
    88  	// We depend on addfinalizer to mark objects that get
    89  	// finalizers after root marking.
    90  	if go115NewMarkrootSpans {
    91  		// We're going to scan the whole heap (that was available at the time the
    92  		// mark phase started, i.e. markArenas) for in-use spans which have specials.
    93  		//
    94  		// Break up the work into arenas, and further into chunks.
    95  		//
    96  		// Snapshot allArenas as markArenas. This snapshot is safe because allArenas
    97  		// is append-only.
    98  		mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)]
    99  		work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
   100  	} else {
   101  		// We're only interested in scanning the in-use spans,
   102  		// which will all be swept at this point. More spans
   103  		// may be added to this list during concurrent GC, but
   104  		// we only care about spans that were allocated before
   105  		// this mark phase.
   106  		work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks()
   107  	}
   108  
   109  	// Scan stacks.
   110  	//
   111  	// Gs may be created after this point, but it's okay that we
   112  	// ignore them because they begin life without any roots, so
   113  	// there's nothing to scan, and any roots they create during
   114  	// the concurrent phase will be scanned during mark
   115  	// termination.
   116  	work.nStackRoots = int(atomic.Loaduintptr(&allglen))
   117  
   118  	work.markrootNext = 0
   119  	work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
   120  }
   121  
   122  // gcMarkRootCheck checks that all roots have been scanned. It is
   123  // purely for debugging.
   124  func gcMarkRootCheck() {
   125  	if work.markrootNext < work.markrootJobs {
   126  		print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
   127  		throw("left over markroot jobs")
   128  	}
   129  
   130  	lock(&allglock)
   131  	// Check that stacks have been scanned.
   132  	var gp *g
   133  	for i := 0; i < work.nStackRoots; i++ {
   134  		gp = allgs[i]
   135  		if !gp.gcscandone {
   136  			goto fail
   137  		}
   138  	}
   139  	unlock(&allglock)
   140  	return
   141  
   142  fail:
   143  	println("gp", gp, "goid", gp.goid,
   144  		"status", readgstatus(gp),
   145  		"gcscandone", gp.gcscandone)
   146  	unlock(&allglock) // Avoid self-deadlock with traceback.
   147  	throw("scan missed a g")
   148  }
   149  
   150  // ptrmask for an allocation containing a single pointer.
   151  var oneptrmask = [...]uint8{1}
   152  
   153  // markroot scans the i'th root.
   154  //
   155  // Preemption must be disabled (because this uses a gcWork).
   156  //
   157  // nowritebarrier is only advisory here.
   158  //
   159  //go:nowritebarrier
   160  func markroot(gcw *gcWork, i uint32) {
   161  	// TODO(austin): This is a bit ridiculous. Compute and store
   162  	// the bases in gcMarkRootPrepare instead of the counts.
   163  	baseFlushCache := uint32(fixedRootCount)
   164  	baseData := baseFlushCache + uint32(work.nFlushCacheRoots)
   165  	baseBSS := baseData + uint32(work.nDataRoots)
   166  	baseSpans := baseBSS + uint32(work.nBSSRoots)
   167  	baseStacks := baseSpans + uint32(work.nSpanRoots)
   168  	end := baseStacks + uint32(work.nStackRoots)
   169  
   170  	// Note: if you add a case here, please also update heapdump.go:dumproots.
   171  	switch {
   172  	case baseFlushCache <= i && i < baseData:
   173  		flushmcache(int(i - baseFlushCache))
   174  
   175  	case baseData <= i && i < baseBSS:
   176  		for _, datap := range activeModules() {
   177  			markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-baseData))
   178  		}
   179  
   180  	case baseBSS <= i && i < baseSpans:
   181  		for _, datap := range activeModules() {
   182  			markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-baseBSS))
   183  		}
   184  
   185  	case i == fixedRootFinalizers:
   186  		for fb := allfin; fb != nil; fb = fb.alllink {
   187  			cnt := uintptr(atomic.Load(&fb.cnt))
   188  			scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
   189  		}
   190  
   191  	case i == fixedRootFreeGStacks:
   192  		// Switch to the system stack so we can call
   193  		// stackfree.
   194  		systemstack(markrootFreeGStacks)
   195  
   196  	case baseSpans <= i && i < baseStacks:
   197  		// mark mspan.specials
   198  		markrootSpans(gcw, int(i-baseSpans))
   199  
   200  	default:
   201  		// the rest is scanning goroutine stacks
   202  		var gp *g
   203  		if baseStacks <= i && i < end {
   204  			gp = allgs[i-baseStacks]
   205  		} else {
   206  			throw("markroot: bad index")
   207  		}
   208  
   209  		// remember when we've first observed the G blocked
   210  		// needed only to output in traceback
   211  		status := readgstatus(gp) // We are not in a scan state
   212  		if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
   213  			gp.waitsince = work.tstart
   214  		}
   215  
   216  		// scanstack must be done on the system stack in case
   217  		// we're trying to scan our own stack.
   218  		systemstack(func() {
   219  			// If this is a self-scan, put the user G in
   220  			// _Gwaiting to prevent self-deadlock. It may
   221  			// already be in _Gwaiting if this is a mark
   222  			// worker or we're in mark termination.
   223  			userG := getg().m.curg
   224  			selfScan := gp == userG && readgstatus(userG) == _Grunning
   225  			if selfScan {
   226  				casgstatus(userG, _Grunning, _Gwaiting)
   227  				userG.waitreason = waitReasonGarbageCollectionScan
   228  			}
   229  
   230  			// TODO: suspendG blocks (and spins) until gp
   231  			// stops, which may take a while for
   232  			// running goroutines. Consider doing this in
   233  			// two phases where the first is non-blocking:
   234  			// we scan the stacks we can and ask running
   235  			// goroutines to scan themselves; and the
   236  			// second blocks.
   237  			stopped := suspendG(gp)
   238  			if stopped.dead {
   239  				gp.gcscandone = true
   240  				return
   241  			}
   242  			if gp.gcscandone {
   243  				throw("g already scanned")
   244  			}
   245  			scanstack(gp, gcw)
   246  			gp.gcscandone = true
   247  			resumeG(stopped)
   248  
   249  			if selfScan {
   250  				casgstatus(userG, _Gwaiting, _Grunning)
   251  			}
   252  		})
   253  	}
   254  }
   255  
   256  // markrootBlock scans the shard'th shard of the block of memory [b0,
   257  // b0+n0), with the given pointer mask.
   258  //
   259  //go:nowritebarrier
   260  func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
   261  	if rootBlockBytes%(8*sys.PtrSize) != 0 {
   262  		// This is necessary to pick byte offsets in ptrmask0.
   263  		throw("rootBlockBytes must be a multiple of 8*ptrSize")
   264  	}
   265  
   266  	// Note that if b0 is toward the end of the address space,
   267  	// then b0 + rootBlockBytes might wrap around.
   268  	// These tests are written to avoid any possible overflow.
   269  	off := uintptr(shard) * rootBlockBytes
   270  	if off >= n0 {
   271  		return
   272  	}
   273  	b := b0 + off
   274  	ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize))))
   275  	n := uintptr(rootBlockBytes)
   276  	if off+n > n0 {
   277  		n = n0 - off
   278  	}
   279  
   280  	// Scan this shard.
   281  	scanblock(b, n, ptrmask, gcw, nil)
   282  }
   283  
   284  // markrootFreeGStacks frees stacks of dead Gs.
   285  //
   286  // This does not free stacks of dead Gs cached on Ps, but having a few
   287  // cached stacks around isn't a problem.
   288  func markrootFreeGStacks() {
   289  	// Take list of dead Gs with stacks.
   290  	lock(&sched.gFree.lock)
   291  	list := sched.gFree.stack
   292  	sched.gFree.stack = gList{}
   293  	unlock(&sched.gFree.lock)
   294  	if list.empty() {
   295  		return
   296  	}
   297  
   298  	// Free stacks.
   299  	q := gQueue{list.head, list.head}
   300  	for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
   301  		stackfree(gp.stack)
   302  		gp.stack.lo = 0
   303  		gp.stack.hi = 0
   304  		// Manipulate the queue directly since the Gs are
   305  		// already all linked the right way.
   306  		q.tail.set(gp)
   307  	}
   308  
   309  	// Put Gs back on the free list.
   310  	lock(&sched.gFree.lock)
   311  	sched.gFree.noStack.pushAll(q)
   312  	unlock(&sched.gFree.lock)
   313  }
   314  
   315  // markrootSpans marks roots for one shard of markArenas.
   316  //
   317  //go:nowritebarrier
   318  func markrootSpans(gcw *gcWork, shard int) {
   319  	if !go115NewMarkrootSpans {
   320  		oldMarkrootSpans(gcw, shard)
   321  		return
   322  	}
   323  	// Objects with finalizers have two GC-related invariants:
   324  	//
   325  	// 1) Everything reachable from the object must be marked.
   326  	// This ensures that when we pass the object to its finalizer,
   327  	// everything the finalizer can reach will be retained.
   328  	//
   329  	// 2) Finalizer specials (which are not in the garbage
   330  	// collected heap) are roots. In practice, this means the fn
   331  	// field must be scanned.
   332  	sg := mheap_.sweepgen
   333  
   334  	// Find the arena and page index into that arena for this shard.
   335  	ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)]
   336  	ha := mheap_.arenas[ai.l1()][ai.l2()]
   337  	arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena)
   338  
   339  	// Construct slice of bitmap which we'll iterate over.
   340  	specialsbits := ha.pageSpecials[arenaPage/8:]
   341  	specialsbits = specialsbits[:pagesPerSpanRoot/8]
   342  	for i := range specialsbits {
   343  		// Find set bits, which correspond to spans with specials.
   344  		specials := atomic.Load8(&specialsbits[i])
   345  		if specials == 0 {
   346  			continue
   347  		}
   348  		for j := uint(0); j < 8; j++ {
   349  			if specials&(1<<j) == 0 {
   350  				continue
   351  			}
   352  			// Find the span for this bit.
   353  			//
   354  			// This value is guaranteed to be non-nil because having
   355  			// specials implies that the span is in-use, and since we're
   356  			// currently marking we can be sure that we don't have to worry
   357  			// about the span being freed and re-used.
   358  			s := ha.spans[arenaPage+uint(i)*8+j]
   359  
   360  			// The state must be mSpanInUse if the specials bit is set, so
   361  			// sanity check that.
   362  			if state := s.state.get(); state != mSpanInUse {
   363  				print("s.state = ", state, "\n")
   364  				throw("non in-use span found with specials bit set")
   365  			}
   366  			// Check that this span was swept (it may be cached or uncached).
   367  			if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
   368  				// sweepgen was updated (+2) during non-checkmark GC pass
   369  				print("sweep ", s.sweepgen, " ", sg, "\n")
   370  				throw("gc: unswept span")
   371  			}
   372  
   373  			// Lock the specials to prevent a special from being
   374  			// removed from the list while we're traversing it.
   375  			lock(&s.speciallock)
   376  			for sp := s.specials; sp != nil; sp = sp.next {
   377  				if sp.kind != _KindSpecialFinalizer {
   378  					continue
   379  				}
   380  				// don't mark finalized object, but scan it so we
   381  				// retain everything it points to.
   382  				spf := (*specialfinalizer)(unsafe.Pointer(sp))
   383  				// A finalizer can be set for an inner byte of an object, find object beginning.
   384  				p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
   385  
   386  				// Mark everything that can be reached from
   387  				// the object (but *not* the object itself or
   388  				// we'll never collect it).
   389  				scanobject(p, gcw)
   390  
   391  				// The special itself is a root.
   392  				scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
   393  			}
   394  			unlock(&s.speciallock)
   395  		}
   396  	}
   397  }
   398  
   399  // oldMarkrootSpans marks roots for one shard of work.spans.
   400  //
   401  // For go115NewMarkrootSpans = false.
   402  //
   403  //go:nowritebarrier
   404  func oldMarkrootSpans(gcw *gcWork, shard int) {
   405  	// Objects with finalizers have two GC-related invariants:
   406  	//
   407  	// 1) Everything reachable from the object must be marked.
   408  	// This ensures that when we pass the object to its finalizer,
   409  	// everything the finalizer can reach will be retained.
   410  	//
   411  	// 2) Finalizer specials (which are not in the garbage
   412  	// collected heap) are roots. In practice, this means the fn
   413  	// field must be scanned.
   414  	//
   415  	// TODO(austin): There are several ideas for making this more
   416  	// efficient in issue #11485.
   417  
   418  	sg := mheap_.sweepgen
   419  	spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard)
   420  	// Note that work.spans may not include spans that were
   421  	// allocated between entering the scan phase and now. We may
   422  	// also race with spans being added into sweepSpans when they're
   423  	// just created, and as a result we may see nil pointers in the
   424  	// spans slice. This is okay because any objects with finalizers
   425  	// in those spans must have been allocated and given finalizers
   426  	// after we entered the scan phase, so addfinalizer will have
   427  	// ensured the above invariants for them.
   428  	for i := 0; i < len(spans); i++ {
   429  		// sweepBuf.block requires that we read pointers from the block atomically.
   430  		// It also requires that we ignore nil pointers.
   431  		s := (*mspan)(atomic.Loadp(unsafe.Pointer(&spans[i])))
   432  
   433  		// This is racing with spans being initialized, so
   434  		// check the state carefully.
   435  		if s == nil || s.state.get() != mSpanInUse {
   436  			continue
   437  		}
   438  		// Check that this span was swept (it may be cached or uncached).
   439  		if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
   440  			// sweepgen was updated (+2) during non-checkmark GC pass
   441  			print("sweep ", s.sweepgen, " ", sg, "\n")
   442  			throw("gc: unswept span")
   443  		}
   444  
   445  		// Speculatively check if there are any specials
   446  		// without acquiring the span lock. This may race with
   447  		// adding the first special to a span, but in that
   448  		// case addfinalizer will observe that the GC is
   449  		// active (which is globally synchronized) and ensure
   450  		// the above invariants. We may also ensure the
   451  		// invariants, but it's okay to scan an object twice.
   452  		if s.specials == nil {
   453  			continue
   454  		}
   455  
   456  		// Lock the specials to prevent a special from being
   457  		// removed from the list while we're traversing it.
   458  		lock(&s.speciallock)
   459  
   460  		for sp := s.specials; sp != nil; sp = sp.next {
   461  			if sp.kind != _KindSpecialFinalizer {
   462  				continue
   463  			}
   464  			// don't mark finalized object, but scan it so we
   465  			// retain everything it points to.
   466  			spf := (*specialfinalizer)(unsafe.Pointer(sp))
   467  			// A finalizer can be set for an inner byte of an object, find object beginning.
   468  			p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
   469  
   470  			// Mark everything that can be reached from
   471  			// the object (but *not* the object itself or
   472  			// we'll never collect it).
   473  			scanobject(p, gcw)
   474  
   475  			// The special itself is a root.
   476  			scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
   477  		}
   478  
   479  		unlock(&s.speciallock)
   480  	}
   481  }
   482  
   483  // gcAssistAlloc performs GC work to make gp's assist debt positive.
   484  // gp must be the calling user gorountine.
   485  //
   486  // This must be called with preemption enabled.
   487  func gcAssistAlloc(gp *g) {
   488  	// Don't assist in non-preemptible contexts. These are
   489  	// generally fragile and won't allow the assist to block.
   490  	if getg() == gp.m.g0 {
   491  		return
   492  	}
   493  	if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
   494  		return
   495  	}
   496  
   497  	traced := false
   498  retry:
   499  	// Compute the amount of scan work we need to do to make the
   500  	// balance positive. When the required amount of work is low,
   501  	// we over-assist to build up credit for future allocations
   502  	// and amortize the cost of assisting.
   503  	debtBytes := -gp.gcAssistBytes
   504  	scanWork := int64(gcController.assistWorkPerByte * float64(debtBytes))
   505  	if scanWork < gcOverAssistWork {
   506  		scanWork = gcOverAssistWork
   507  		debtBytes = int64(gcController.assistBytesPerWork * float64(scanWork))
   508  	}
   509  
   510  	// Steal as much credit as we can from the background GC's
   511  	// scan credit. This is racy and may drop the background
   512  	// credit below 0 if two mutators steal at the same time. This
   513  	// will just cause steals to fail until credit is accumulated
   514  	// again, so in the long run it doesn't really matter, but we
   515  	// do have to handle the negative credit case.
   516  	bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit)
   517  	stolen := int64(0)
   518  	if bgScanCredit > 0 {
   519  		if bgScanCredit < scanWork {
   520  			stolen = bgScanCredit
   521  			gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(stolen))
   522  		} else {
   523  			stolen = scanWork
   524  			gp.gcAssistBytes += debtBytes
   525  		}
   526  		atomic.Xaddint64(&gcController.bgScanCredit, -stolen)
   527  
   528  		scanWork -= stolen
   529  
   530  		if scanWork == 0 {
   531  			// We were able to steal all of the credit we
   532  			// needed.
   533  			if traced {
   534  				traceGCMarkAssistDone()
   535  			}
   536  			return
   537  		}
   538  	}
   539  
   540  	if trace.enabled && !traced {
   541  		traced = true
   542  		traceGCMarkAssistStart()
   543  	}
   544  
   545  	// Perform assist work
   546  	systemstack(func() {
   547  		gcAssistAlloc1(gp, scanWork)
   548  		// The user stack may have moved, so this can't touch
   549  		// anything on it until it returns from systemstack.
   550  	})
   551  
   552  	completed := gp.param != nil
   553  	gp.param = nil
   554  	if completed {
   555  		gcMarkDone()
   556  	}
   557  
   558  	if gp.gcAssistBytes < 0 {
   559  		// We were unable steal enough credit or perform
   560  		// enough work to pay off the assist debt. We need to
   561  		// do one of these before letting the mutator allocate
   562  		// more to prevent over-allocation.
   563  		//
   564  		// If this is because we were preempted, reschedule
   565  		// and try some more.
   566  		if gp.preempt {
   567  			Gosched()
   568  			goto retry
   569  		}
   570  
   571  		// Add this G to an assist queue and park. When the GC
   572  		// has more background credit, it will satisfy queued
   573  		// assists before flushing to the global credit pool.
   574  		//
   575  		// Note that this does *not* get woken up when more
   576  		// work is added to the work list. The theory is that
   577  		// there wasn't enough work to do anyway, so we might
   578  		// as well let background marking take care of the
   579  		// work that is available.
   580  		if !gcParkAssist() {
   581  			goto retry
   582  		}
   583  
   584  		// At this point either background GC has satisfied
   585  		// this G's assist debt, or the GC cycle is over.
   586  	}
   587  	if traced {
   588  		traceGCMarkAssistDone()
   589  	}
   590  }
   591  
   592  // gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system
   593  // stack. This is a separate function to make it easier to see that
   594  // we're not capturing anything from the user stack, since the user
   595  // stack may move while we're in this function.
   596  //
   597  // gcAssistAlloc1 indicates whether this assist completed the mark
   598  // phase by setting gp.param to non-nil. This can't be communicated on
   599  // the stack since it may move.
   600  //
   601  //go:systemstack
   602  func gcAssistAlloc1(gp *g, scanWork int64) {
   603  	// Clear the flag indicating that this assist completed the
   604  	// mark phase.
   605  	gp.param = nil
   606  
   607  	if atomic.Load(&gcBlackenEnabled) == 0 {
   608  		// The gcBlackenEnabled check in malloc races with the
   609  		// store that clears it but an atomic check in every malloc
   610  		// would be a performance hit.
   611  		// Instead we recheck it here on the non-preemptable system
   612  		// stack to determine if we should perform an assist.
   613  
   614  		// GC is done, so ignore any remaining debt.
   615  		gp.gcAssistBytes = 0
   616  		return
   617  	}
   618  	// Track time spent in this assist. Since we're on the
   619  	// system stack, this is non-preemptible, so we can
   620  	// just measure start and end time.
   621  	startTime := nanotime()
   622  
   623  	decnwait := atomic.Xadd(&work.nwait, -1)
   624  	if decnwait == work.nproc {
   625  		println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
   626  		throw("nwait > work.nprocs")
   627  	}
   628  
   629  	// gcDrainN requires the caller to be preemptible.
   630  	casgstatus(gp, _Grunning, _Gwaiting)
   631  	gp.waitreason = waitReasonGCAssistMarking
   632  
   633  	// drain own cached work first in the hopes that it
   634  	// will be more cache friendly.
   635  	gcw := &getg().m.p.ptr().gcw
   636  	workDone := gcDrainN(gcw, scanWork)
   637  
   638  	casgstatus(gp, _Gwaiting, _Grunning)
   639  
   640  	// Record that we did this much scan work.
   641  	//
   642  	// Back out the number of bytes of assist credit that
   643  	// this scan work counts for. The "1+" is a poor man's
   644  	// round-up, to ensure this adds credit even if
   645  	// assistBytesPerWork is very low.
   646  	gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(workDone))
   647  
   648  	// If this is the last worker and we ran out of work,
   649  	// signal a completion point.
   650  	incnwait := atomic.Xadd(&work.nwait, +1)
   651  	if incnwait > work.nproc {
   652  		println("runtime: work.nwait=", incnwait,
   653  			"work.nproc=", work.nproc)
   654  		throw("work.nwait > work.nproc")
   655  	}
   656  
   657  	if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
   658  		// This has reached a background completion point. Set
   659  		// gp.param to a non-nil value to indicate this. It
   660  		// doesn't matter what we set it to (it just has to be
   661  		// a valid pointer).
   662  		gp.param = unsafe.Pointer(gp)
   663  	}
   664  	duration := nanotime() - startTime
   665  	_p_ := gp.m.p.ptr()
   666  	_p_.gcAssistTime += duration
   667  	if _p_.gcAssistTime > gcAssistTimeSlack {
   668  		atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime)
   669  		_p_.gcAssistTime = 0
   670  	}
   671  }
   672  
   673  // gcWakeAllAssists wakes all currently blocked assists. This is used
   674  // at the end of a GC cycle. gcBlackenEnabled must be false to prevent
   675  // new assists from going to sleep after this point.
   676  func gcWakeAllAssists() {
   677  	lock(&work.assistQueue.lock)
   678  	list := work.assistQueue.q.popList()
   679  	injectglist(&list)
   680  	unlock(&work.assistQueue.lock)
   681  }
   682  
   683  // gcParkAssist puts the current goroutine on the assist queue and parks.
   684  //
   685  // gcParkAssist reports whether the assist is now satisfied. If it
   686  // returns false, the caller must retry the assist.
   687  //
   688  //go:nowritebarrier
   689  func gcParkAssist() bool {
   690  	lock(&work.assistQueue.lock)
   691  	// If the GC cycle finished while we were getting the lock,
   692  	// exit the assist. The cycle can't finish while we hold the
   693  	// lock.
   694  	if atomic.Load(&gcBlackenEnabled) == 0 {
   695  		unlock(&work.assistQueue.lock)
   696  		return true
   697  	}
   698  
   699  	gp := getg()
   700  	oldList := work.assistQueue.q
   701  	work.assistQueue.q.pushBack(gp)
   702  
   703  	// Recheck for background credit now that this G is in
   704  	// the queue, but can still back out. This avoids a
   705  	// race in case background marking has flushed more
   706  	// credit since we checked above.
   707  	if atomic.Loadint64(&gcController.bgScanCredit) > 0 {
   708  		work.assistQueue.q = oldList
   709  		if oldList.tail != 0 {
   710  			oldList.tail.ptr().schedlink.set(nil)
   711  		}
   712  		unlock(&work.assistQueue.lock)
   713  		return false
   714  	}
   715  	// Park.
   716  	goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2)
   717  	return true
   718  }
   719  
   720  // gcFlushBgCredit flushes scanWork units of background scan work
   721  // credit. This first satisfies blocked assists on the
   722  // work.assistQueue and then flushes any remaining credit to
   723  // gcController.bgScanCredit.
   724  //
   725  // Write barriers are disallowed because this is used by gcDrain after
   726  // it has ensured that all work is drained and this must preserve that
   727  // condition.
   728  //
   729  //go:nowritebarrierrec
   730  func gcFlushBgCredit(scanWork int64) {
   731  	if work.assistQueue.q.empty() {
   732  		// Fast path; there are no blocked assists. There's a
   733  		// small window here where an assist may add itself to
   734  		// the blocked queue and park. If that happens, we'll
   735  		// just get it on the next flush.
   736  		atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
   737  		return
   738  	}
   739  
   740  	scanBytes := int64(float64(scanWork) * gcController.assistBytesPerWork)
   741  
   742  	lock(&work.assistQueue.lock)
   743  	for !work.assistQueue.q.empty() && scanBytes > 0 {
   744  		gp := work.assistQueue.q.pop()
   745  		// Note that gp.gcAssistBytes is negative because gp
   746  		// is in debt. Think carefully about the signs below.
   747  		if scanBytes+gp.gcAssistBytes >= 0 {
   748  			// Satisfy this entire assist debt.
   749  			scanBytes += gp.gcAssistBytes
   750  			gp.gcAssistBytes = 0
   751  			// It's important that we *not* put gp in
   752  			// runnext. Otherwise, it's possible for user
   753  			// code to exploit the GC worker's high
   754  			// scheduler priority to get itself always run
   755  			// before other goroutines and always in the
   756  			// fresh quantum started by GC.
   757  			ready(gp, 0, false)
   758  		} else {
   759  			// Partially satisfy this assist.
   760  			gp.gcAssistBytes += scanBytes
   761  			scanBytes = 0
   762  			// As a heuristic, we move this assist to the
   763  			// back of the queue so that large assists
   764  			// can't clog up the assist queue and
   765  			// substantially delay small assists.
   766  			work.assistQueue.q.pushBack(gp)
   767  			break
   768  		}
   769  	}
   770  
   771  	if scanBytes > 0 {
   772  		// Convert from scan bytes back to work.
   773  		scanWork = int64(float64(scanBytes) * gcController.assistWorkPerByte)
   774  		atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
   775  	}
   776  	unlock(&work.assistQueue.lock)
   777  }
   778  
   779  // scanstack scans gp's stack, greying all pointers found on the stack.
   780  //
   781  // scanstack will also shrink the stack if it is safe to do so. If it
   782  // is not, it schedules a stack shrink for the next synchronous safe
   783  // point.
   784  //
   785  // scanstack is marked go:systemstack because it must not be preempted
   786  // while using a workbuf.
   787  //
   788  //go:nowritebarrier
   789  //go:systemstack
   790  func scanstack(gp *g, gcw *gcWork) {
   791  	if readgstatus(gp)&_Gscan == 0 {
   792  		print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
   793  		throw("scanstack - bad status")
   794  	}
   795  
   796  	switch readgstatus(gp) &^ _Gscan {
   797  	default:
   798  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   799  		throw("mark - bad status")
   800  	case _Gdead:
   801  		return
   802  	case _Grunning:
   803  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   804  		throw("scanstack: goroutine not stopped")
   805  	case _Grunnable, _Gsyscall, _Gwaiting:
   806  		// ok
   807  	}
   808  
   809  	if gp == getg() {
   810  		throw("can't scan our own stack")
   811  	}
   812  
   813  	if isShrinkStackSafe(gp) {
   814  		// Shrink the stack if not much of it is being used.
   815  		shrinkstack(gp)
   816  	} else {
   817  		// Otherwise, shrink the stack at the next sync safe point.
   818  		gp.preemptShrink = true
   819  	}
   820  
   821  	var state stackScanState
   822  	state.stack = gp.stack
   823  
   824  	if stackTraceDebug {
   825  		println("stack trace goroutine", gp.goid)
   826  	}
   827  
   828  	if debugScanConservative && gp.asyncSafePoint {
   829  		print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
   830  	}
   831  
   832  	// Scan the saved context register. This is effectively a live
   833  	// register that gets moved back and forth between the
   834  	// register and sched.ctxt without a write barrier.
   835  	if gp.sched.ctxt != nil {
   836  		scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw, &state)
   837  	}
   838  
   839  	// Scan the stack. Accumulate a list of stack objects.
   840  	scanframe := func(frame *stkframe, unused unsafe.Pointer) bool {
   841  		scanframeworker(frame, &state, gcw)
   842  		return true
   843  	}
   844  	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
   845  
   846  	// Find additional pointers that point into the stack from the heap.
   847  	// Currently this includes defers and panics. See also function copystack.
   848  
   849  	// Find and trace all defer arguments.
   850  	tracebackdefers(gp, scanframe, nil)
   851  
   852  	// Find and trace other pointers in defer records.
   853  	for d := gp._defer; d != nil; d = d.link {
   854  		if d.fn != nil {
   855  			// tracebackdefers above does not scan the func value, which could
   856  			// be a stack allocated closure. See issue 30453.
   857  			scanblock(uintptr(unsafe.Pointer(&d.fn)), sys.PtrSize, &oneptrmask[0], gcw, &state)
   858  		}
   859  		if d.link != nil {
   860  			// The link field of a stack-allocated defer record might point
   861  			// to a heap-allocated defer record. Keep that heap record live.
   862  			scanblock(uintptr(unsafe.Pointer(&d.link)), sys.PtrSize, &oneptrmask[0], gcw, &state)
   863  		}
   864  		// Retain defers records themselves.
   865  		// Defer records might not be reachable from the G through regular heap
   866  		// tracing because the defer linked list might weave between the stack and the heap.
   867  		if d.heap {
   868  			scanblock(uintptr(unsafe.Pointer(&d)), sys.PtrSize, &oneptrmask[0], gcw, &state)
   869  		}
   870  	}
   871  	if gp._panic != nil {
   872  		// Panics are always stack allocated.
   873  		state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false)
   874  	}
   875  
   876  	// Find and scan all reachable stack objects.
   877  	//
   878  	// The state's pointer queue prioritizes precise pointers over
   879  	// conservative pointers so that we'll prefer scanning stack
   880  	// objects precisely.
   881  	state.buildIndex()
   882  	for {
   883  		p, conservative := state.getPtr()
   884  		if p == 0 {
   885  			break
   886  		}
   887  		obj := state.findObject(p)
   888  		if obj == nil {
   889  			continue
   890  		}
   891  		t := obj.typ
   892  		if t == nil {
   893  			// We've already scanned this object.
   894  			continue
   895  		}
   896  		obj.setType(nil) // Don't scan it again.
   897  		if stackTraceDebug {
   898  			printlock()
   899  			print("  live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of type", t.string())
   900  			if conservative {
   901  				print(" (conservative)")
   902  			}
   903  			println()
   904  			printunlock()
   905  		}
   906  		gcdata := t.gcdata
   907  		var s *mspan
   908  		if t.kind&kindGCProg != 0 {
   909  			// This path is pretty unlikely, an object large enough
   910  			// to have a GC program allocated on the stack.
   911  			// We need some space to unpack the program into a straight
   912  			// bitmask, which we allocate/free here.
   913  			// TODO: it would be nice if there were a way to run a GC
   914  			// program without having to store all its bits. We'd have
   915  			// to change from a Lempel-Ziv style program to something else.
   916  			// Or we can forbid putting objects on stacks if they require
   917  			// a gc program (see issue 27447).
   918  			s = materializeGCProg(t.ptrdata, gcdata)
   919  			gcdata = (*byte)(unsafe.Pointer(s.startAddr))
   920  		}
   921  
   922  		b := state.stack.lo + uintptr(obj.off)
   923  		if conservative {
   924  			scanConservative(b, t.ptrdata, gcdata, gcw, &state)
   925  		} else {
   926  			scanblock(b, t.ptrdata, gcdata, gcw, &state)
   927  		}
   928  
   929  		if s != nil {
   930  			dematerializeGCProg(s)
   931  		}
   932  	}
   933  
   934  	// Deallocate object buffers.
   935  	// (Pointer buffers were all deallocated in the loop above.)
   936  	for state.head != nil {
   937  		x := state.head
   938  		state.head = x.next
   939  		if stackTraceDebug {
   940  			for _, obj := range x.obj[:x.nobj] {
   941  				if obj.typ == nil { // reachable
   942  					continue
   943  				}
   944  				println("  dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of type", obj.typ.string())
   945  				// Note: not necessarily really dead - only reachable-from-ptr dead.
   946  			}
   947  		}
   948  		x.nobj = 0
   949  		putempty((*workbuf)(unsafe.Pointer(x)))
   950  	}
   951  	if state.buf != nil || state.cbuf != nil || state.freeBuf != nil {
   952  		throw("remaining pointer buffers")
   953  	}
   954  }
   955  
   956  // Scan a stack frame: local variables and function arguments/results.
   957  //go:nowritebarrier
   958  func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
   959  	if _DebugGC > 1 && frame.continpc != 0 {
   960  		print("scanframe ", funcname(frame.fn), "\n")
   961  	}
   962  
   963  	isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == funcID_asyncPreempt
   964  	isDebugCall := frame.fn.valid() && frame.fn.funcID == funcID_debugCallV1
   965  	if state.conservative || isAsyncPreempt || isDebugCall {
   966  		if debugScanConservative {
   967  			println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc))
   968  		}
   969  
   970  		// Conservatively scan the frame. Unlike the precise
   971  		// case, this includes the outgoing argument space
   972  		// since we may have stopped while this function was
   973  		// setting up a call.
   974  		//
   975  		// TODO: We could narrow this down if the compiler
   976  		// produced a single map per function of stack slots
   977  		// and registers that ever contain a pointer.
   978  		if frame.varp != 0 {
   979  			size := frame.varp - frame.sp
   980  			if size > 0 {
   981  				scanConservative(frame.sp, size, nil, gcw, state)
   982  			}
   983  		}
   984  
   985  		// Scan arguments to this frame.
   986  		if frame.arglen != 0 {
   987  			// TODO: We could pass the entry argument map
   988  			// to narrow this down further.
   989  			scanConservative(frame.argp, frame.arglen, nil, gcw, state)
   990  		}
   991  
   992  		if isAsyncPreempt || isDebugCall {
   993  			// This function's frame contained the
   994  			// registers for the asynchronously stopped
   995  			// parent frame. Scan the parent
   996  			// conservatively.
   997  			state.conservative = true
   998  		} else {
   999  			// We only wanted to scan those two frames
  1000  			// conservatively. Clear the flag for future
  1001  			// frames.
  1002  			state.conservative = false
  1003  		}
  1004  		return
  1005  	}
  1006  
  1007  	locals, args, objs := getStackMap(frame, &state.cache, false)
  1008  
  1009  	// Scan local variables if stack frame has been allocated.
  1010  	if locals.n > 0 {
  1011  		size := uintptr(locals.n) * sys.PtrSize
  1012  		scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
  1013  	}
  1014  
  1015  	// Scan arguments.
  1016  	if args.n > 0 {
  1017  		scanblock(frame.argp, uintptr(args.n)*sys.PtrSize, args.bytedata, gcw, state)
  1018  	}
  1019  
  1020  	// Add all stack objects to the stack object list.
  1021  	if frame.varp != 0 {
  1022  		// varp is 0 for defers, where there are no locals.
  1023  		// In that case, there can't be a pointer to its args, either.
  1024  		// (And all args would be scanned above anyway.)
  1025  		for _, obj := range objs {
  1026  			off := obj.off
  1027  			base := frame.varp // locals base pointer
  1028  			if off >= 0 {
  1029  				base = frame.argp // arguments and return values base pointer
  1030  			}
  1031  			ptr := base + uintptr(off)
  1032  			if ptr < frame.sp {
  1033  				// object hasn't been allocated in the frame yet.
  1034  				continue
  1035  			}
  1036  			if stackTraceDebug {
  1037  				println("stkobj at", hex(ptr), "of type", obj.typ.string())
  1038  			}
  1039  			state.addObject(ptr, obj.typ)
  1040  		}
  1041  	}
  1042  }
  1043  
  1044  type gcDrainFlags int
  1045  
  1046  const (
  1047  	gcDrainUntilPreempt gcDrainFlags = 1 << iota
  1048  	gcDrainFlushBgCredit
  1049  	gcDrainIdle
  1050  	gcDrainFractional
  1051  )
  1052  
  1053  // gcDrain scans roots and objects in work buffers, blackening grey
  1054  // objects until it is unable to get more work. It may return before
  1055  // GC is done; it's the caller's responsibility to balance work from
  1056  // other Ps.
  1057  //
  1058  // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
  1059  // is set.
  1060  //
  1061  // If flags&gcDrainIdle != 0, gcDrain returns when there is other work
  1062  // to do.
  1063  //
  1064  // If flags&gcDrainFractional != 0, gcDrain self-preempts when
  1065  // pollFractionalWorkerExit() returns true. This implies
  1066  // gcDrainNoBlock.
  1067  //
  1068  // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
  1069  // credit to gcController.bgScanCredit every gcCreditSlack units of
  1070  // scan work.
  1071  //
  1072  // gcDrain will always return if there is a pending STW.
  1073  //
  1074  //go:nowritebarrier
  1075  func gcDrain(gcw *gcWork, flags gcDrainFlags) {
  1076  	if !writeBarrier.needed {
  1077  		throw("gcDrain phase incorrect")
  1078  	}
  1079  
  1080  	gp := getg().m.curg
  1081  	preemptible := flags&gcDrainUntilPreempt != 0
  1082  	flushBgCredit := flags&gcDrainFlushBgCredit != 0
  1083  	idle := flags&gcDrainIdle != 0
  1084  
  1085  	initScanWork := gcw.scanWork
  1086  
  1087  	// checkWork is the scan work before performing the next
  1088  	// self-preempt check.
  1089  	checkWork := int64(1<<63 - 1)
  1090  	var check func() bool
  1091  	if flags&(gcDrainIdle|gcDrainFractional) != 0 {
  1092  		checkWork = initScanWork + drainCheckThreshold
  1093  		if idle {
  1094  			check = pollWork
  1095  		} else if flags&gcDrainFractional != 0 {
  1096  			check = pollFractionalWorkerExit
  1097  		}
  1098  	}
  1099  
  1100  	// Drain root marking jobs.
  1101  	if work.markrootNext < work.markrootJobs {
  1102  		// Stop if we're preemptible or if someone wants to STW.
  1103  		for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) {
  1104  			job := atomic.Xadd(&work.markrootNext, +1) - 1
  1105  			if job >= work.markrootJobs {
  1106  				break
  1107  			}
  1108  			markroot(gcw, job)
  1109  			if check != nil && check() {
  1110  				goto done
  1111  			}
  1112  		}
  1113  	}
  1114  
  1115  	// Drain heap marking jobs.
  1116  	// Stop if we're preemptible or if someone wants to STW.
  1117  	for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) {
  1118  		// Try to keep work available on the global queue. We used to
  1119  		// check if there were waiting workers, but it's better to
  1120  		// just keep work available than to make workers wait. In the
  1121  		// worst case, we'll do O(log(_WorkbufSize)) unnecessary
  1122  		// balances.
  1123  		if work.full == 0 {
  1124  			gcw.balance()
  1125  		}
  1126  
  1127  		b := gcw.tryGetFast()
  1128  		if b == 0 {
  1129  			b = gcw.tryGet()
  1130  			if b == 0 {
  1131  				// Flush the write barrier
  1132  				// buffer; this may create
  1133  				// more work.
  1134  				wbBufFlush(nil, 0)
  1135  				b = gcw.tryGet()
  1136  			}
  1137  		}
  1138  		if b == 0 {
  1139  			// Unable to get work.
  1140  			break
  1141  		}
  1142  		scanobject(b, gcw)
  1143  
  1144  		// Flush background scan work credit to the global
  1145  		// account if we've accumulated enough locally so
  1146  		// mutator assists can draw on it.
  1147  		if gcw.scanWork >= gcCreditSlack {
  1148  			atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
  1149  			if flushBgCredit {
  1150  				gcFlushBgCredit(gcw.scanWork - initScanWork)
  1151  				initScanWork = 0
  1152  			}
  1153  			checkWork -= gcw.scanWork
  1154  			gcw.scanWork = 0
  1155  
  1156  			if checkWork <= 0 {
  1157  				checkWork += drainCheckThreshold
  1158  				if check != nil && check() {
  1159  					break
  1160  				}
  1161  			}
  1162  		}
  1163  	}
  1164  
  1165  done:
  1166  	// Flush remaining scan work credit.
  1167  	if gcw.scanWork > 0 {
  1168  		atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
  1169  		if flushBgCredit {
  1170  			gcFlushBgCredit(gcw.scanWork - initScanWork)
  1171  		}
  1172  		gcw.scanWork = 0
  1173  	}
  1174  }
  1175  
  1176  // gcDrainN blackens grey objects until it has performed roughly
  1177  // scanWork units of scan work or the G is preempted. This is
  1178  // best-effort, so it may perform less work if it fails to get a work
  1179  // buffer. Otherwise, it will perform at least n units of work, but
  1180  // may perform more because scanning is always done in whole object
  1181  // increments. It returns the amount of scan work performed.
  1182  //
  1183  // The caller goroutine must be in a preemptible state (e.g.,
  1184  // _Gwaiting) to prevent deadlocks during stack scanning. As a
  1185  // consequence, this must be called on the system stack.
  1186  //
  1187  //go:nowritebarrier
  1188  //go:systemstack
  1189  func gcDrainN(gcw *gcWork, scanWork int64) int64 {
  1190  	if !writeBarrier.needed {
  1191  		throw("gcDrainN phase incorrect")
  1192  	}
  1193  
  1194  	// There may already be scan work on the gcw, which we don't
  1195  	// want to claim was done by this call.
  1196  	workFlushed := -gcw.scanWork
  1197  
  1198  	gp := getg().m.curg
  1199  	for !gp.preempt && workFlushed+gcw.scanWork < scanWork {
  1200  		// See gcDrain comment.
  1201  		if work.full == 0 {
  1202  			gcw.balance()
  1203  		}
  1204  
  1205  		// This might be a good place to add prefetch code...
  1206  		// if(wbuf.nobj > 4) {
  1207  		//         PREFETCH(wbuf->obj[wbuf.nobj - 3];
  1208  		//  }
  1209  		//
  1210  		b := gcw.tryGetFast()
  1211  		if b == 0 {
  1212  			b = gcw.tryGet()
  1213  			if b == 0 {
  1214  				// Flush the write barrier buffer;
  1215  				// this may create more work.
  1216  				wbBufFlush(nil, 0)
  1217  				b = gcw.tryGet()
  1218  			}
  1219  		}
  1220  
  1221  		if b == 0 {
  1222  			// Try to do a root job.
  1223  			//
  1224  			// TODO: Assists should get credit for this
  1225  			// work.
  1226  			if work.markrootNext < work.markrootJobs {
  1227  				job := atomic.Xadd(&work.markrootNext, +1) - 1
  1228  				if job < work.markrootJobs {
  1229  					markroot(gcw, job)
  1230  					continue
  1231  				}
  1232  			}
  1233  			// No heap or root jobs.
  1234  			break
  1235  		}
  1236  		scanobject(b, gcw)
  1237  
  1238  		// Flush background scan work credit.
  1239  		if gcw.scanWork >= gcCreditSlack {
  1240  			atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
  1241  			workFlushed += gcw.scanWork
  1242  			gcw.scanWork = 0
  1243  		}
  1244  	}
  1245  
  1246  	// Unlike gcDrain, there's no need to flush remaining work
  1247  	// here because this never flushes to bgScanCredit and
  1248  	// gcw.dispose will flush any remaining work to scanWork.
  1249  
  1250  	return workFlushed + gcw.scanWork
  1251  }
  1252  
  1253  // scanblock scans b as scanobject would, but using an explicit
  1254  // pointer bitmap instead of the heap bitmap.
  1255  //
  1256  // This is used to scan non-heap roots, so it does not update
  1257  // gcw.bytesMarked or gcw.scanWork.
  1258  //
  1259  // If stk != nil, possible stack pointers are also reported to stk.putPtr.
  1260  //go:nowritebarrier
  1261  func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) {
  1262  	// Use local copies of original parameters, so that a stack trace
  1263  	// due to one of the throws below shows the original block
  1264  	// base and extent.
  1265  	b := b0
  1266  	n := n0
  1267  
  1268  	for i := uintptr(0); i < n; {
  1269  		// Find bits for the next word.
  1270  		bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
  1271  		if bits == 0 {
  1272  			i += sys.PtrSize * 8
  1273  			continue
  1274  		}
  1275  		for j := 0; j < 8 && i < n; j++ {
  1276  			if bits&1 != 0 {
  1277  				// Same work as in scanobject; see comments there.
  1278  				p := *(*uintptr)(unsafe.Pointer(b + i))
  1279  				if p != 0 {
  1280  					if obj, span, objIndex := findObject(p, b, i); obj != 0 {
  1281  						greyobject(obj, b, i, span, gcw, objIndex)
  1282  					} else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi {
  1283  						stk.putPtr(p, false)
  1284  					}
  1285  				}
  1286  			}
  1287  			bits >>= 1
  1288  			i += sys.PtrSize
  1289  		}
  1290  	}
  1291  }
  1292  
  1293  // scanobject scans the object starting at b, adding pointers to gcw.
  1294  // b must point to the beginning of a heap object or an oblet.
  1295  // scanobject consults the GC bitmap for the pointer mask and the
  1296  // spans for the size of the object.
  1297  //
  1298  //go:nowritebarrier
  1299  func scanobject(b uintptr, gcw *gcWork) {
  1300  	// Find the bits for b and the size of the object at b.
  1301  	//
  1302  	// b is either the beginning of an object, in which case this
  1303  	// is the size of the object to scan, or it points to an
  1304  	// oblet, in which case we compute the size to scan below.
  1305  	hbits := heapBitsForAddr(b)
  1306  	s := spanOfUnchecked(b)
  1307  	n := s.elemsize
  1308  	if n == 0 {
  1309  		throw("scanobject n == 0")
  1310  	}
  1311  
  1312  	if n > maxObletBytes {
  1313  		// Large object. Break into oblets for better
  1314  		// parallelism and lower latency.
  1315  		if b == s.base() {
  1316  			// It's possible this is a noscan object (not
  1317  			// from greyobject, but from other code
  1318  			// paths), in which case we must *not* enqueue
  1319  			// oblets since their bitmaps will be
  1320  			// uninitialized.
  1321  			if s.spanclass.noscan() {
  1322  				// Bypass the whole scan.
  1323  				gcw.bytesMarked += uint64(n)
  1324  				return
  1325  			}
  1326  
  1327  			// Enqueue the other oblets to scan later.
  1328  			// Some oblets may be in b's scalar tail, but
  1329  			// these will be marked as "no more pointers",
  1330  			// so we'll drop out immediately when we go to
  1331  			// scan those.
  1332  			for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
  1333  				if !gcw.putFast(oblet) {
  1334  					gcw.put(oblet)
  1335  				}
  1336  			}
  1337  		}
  1338  
  1339  		// Compute the size of the oblet. Since this object
  1340  		// must be a large object, s.base() is the beginning
  1341  		// of the object.
  1342  		n = s.base() + s.elemsize - b
  1343  		if n > maxObletBytes {
  1344  			n = maxObletBytes
  1345  		}
  1346  	}
  1347  
  1348  	var i uintptr
  1349  	for i = 0; i < n; i += sys.PtrSize {
  1350  		// Find bits for this word.
  1351  		if i != 0 {
  1352  			// Avoid needless hbits.next() on last iteration.
  1353  			hbits = hbits.next()
  1354  		}
  1355  		// Load bits once. See CL 22712 and issue 16973 for discussion.
  1356  		bits := hbits.bits()
  1357  		// During checkmarking, 1-word objects store the checkmark
  1358  		// in the type bit for the one word. The only one-word objects
  1359  		// are pointers, or else they'd be merged with other non-pointer
  1360  		// data into larger allocations.
  1361  		if i != 1*sys.PtrSize && bits&bitScan == 0 {
  1362  			break // no more pointers in this object
  1363  		}
  1364  		if bits&bitPointer == 0 {
  1365  			continue // not a pointer
  1366  		}
  1367  
  1368  		// Work here is duplicated in scanblock and above.
  1369  		// If you make changes here, make changes there too.
  1370  		obj := *(*uintptr)(unsafe.Pointer(b + i))
  1371  
  1372  		// At this point we have extracted the next potential pointer.
  1373  		// Quickly filter out nil and pointers back to the current object.
  1374  		if obj != 0 && obj-b >= n {
  1375  			// Test if obj points into the Go heap and, if so,
  1376  			// mark the object.
  1377  			//
  1378  			// Note that it's possible for findObject to
  1379  			// fail if obj points to a just-allocated heap
  1380  			// object because of a race with growing the
  1381  			// heap. In this case, we know the object was
  1382  			// just allocated and hence will be marked by
  1383  			// allocation itself.
  1384  			if obj, span, objIndex := findObject(obj, b, i); obj != 0 {
  1385  				greyobject(obj, b, i, span, gcw, objIndex)
  1386  			}
  1387  		}
  1388  	}
  1389  	gcw.bytesMarked += uint64(n)
  1390  	gcw.scanWork += int64(i)
  1391  }
  1392  
  1393  // scanConservative scans block [b, b+n) conservatively, treating any
  1394  // pointer-like value in the block as a pointer.
  1395  //
  1396  // If ptrmask != nil, only words that are marked in ptrmask are
  1397  // considered as potential pointers.
  1398  //
  1399  // If state != nil, it's assumed that [b, b+n) is a block in the stack
  1400  // and may contain pointers to stack objects.
  1401  func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) {
  1402  	if debugScanConservative {
  1403  		printlock()
  1404  		print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
  1405  		hexdumpWords(b, b+n, func(p uintptr) byte {
  1406  			if ptrmask != nil {
  1407  				word := (p - b) / sys.PtrSize
  1408  				bits := *addb(ptrmask, word/8)
  1409  				if (bits>>(word%8))&1 == 0 {
  1410  					return '$'
  1411  				}
  1412  			}
  1413  
  1414  			val := *(*uintptr)(unsafe.Pointer(p))
  1415  			if state != nil && state.stack.lo <= val && val < state.stack.hi {
  1416  				return '@'
  1417  			}
  1418  
  1419  			span := spanOfHeap(val)
  1420  			if span == nil {
  1421  				return ' '
  1422  			}
  1423  			idx := span.objIndex(val)
  1424  			if span.isFree(idx) {
  1425  				return ' '
  1426  			}
  1427  			return '*'
  1428  		})
  1429  		printunlock()
  1430  	}
  1431  
  1432  	for i := uintptr(0); i < n; i += sys.PtrSize {
  1433  		if ptrmask != nil {
  1434  			word := i / sys.PtrSize
  1435  			bits := *addb(ptrmask, word/8)
  1436  			if bits == 0 {
  1437  				// Skip 8 words (the loop increment will do the 8th)
  1438  				//
  1439  				// This must be the first time we've
  1440  				// seen this word of ptrmask, so i
  1441  				// must be 8-word-aligned, but check
  1442  				// our reasoning just in case.
  1443  				if i%(sys.PtrSize*8) != 0 {
  1444  					throw("misaligned mask")
  1445  				}
  1446  				i += sys.PtrSize*8 - sys.PtrSize
  1447  				continue
  1448  			}
  1449  			if (bits>>(word%8))&1 == 0 {
  1450  				continue
  1451  			}
  1452  		}
  1453  
  1454  		val := *(*uintptr)(unsafe.Pointer(b + i))
  1455  
  1456  		// Check if val points into the stack.
  1457  		if state != nil && state.stack.lo <= val && val < state.stack.hi {
  1458  			// val may point to a stack object. This
  1459  			// object may be dead from last cycle and
  1460  			// hence may contain pointers to unallocated
  1461  			// objects, but unlike heap objects we can't
  1462  			// tell if it's already dead. Hence, if all
  1463  			// pointers to this object are from
  1464  			// conservative scanning, we have to scan it
  1465  			// defensively, too.
  1466  			state.putPtr(val, true)
  1467  			continue
  1468  		}
  1469  
  1470  		// Check if val points to a heap span.
  1471  		span := spanOfHeap(val)
  1472  		if span == nil {
  1473  			continue
  1474  		}
  1475  
  1476  		// Check if val points to an allocated object.
  1477  		idx := span.objIndex(val)
  1478  		if span.isFree(idx) {
  1479  			continue
  1480  		}
  1481  
  1482  		// val points to an allocated object. Mark it.
  1483  		obj := span.base() + idx*span.elemsize
  1484  		greyobject(obj, b, i, span, gcw, idx)
  1485  	}
  1486  }
  1487  
  1488  // Shade the object if it isn't already.
  1489  // The object is not nil and known to be in the heap.
  1490  // Preemption must be disabled.
  1491  //go:nowritebarrier
  1492  func shade(b uintptr) {
  1493  	if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {
  1494  		gcw := &getg().m.p.ptr().gcw
  1495  		greyobject(obj, 0, 0, span, gcw, objIndex)
  1496  	}
  1497  }
  1498  
  1499  // obj is the start of an object with mark mbits.
  1500  // If it isn't already marked, mark it and enqueue into gcw.
  1501  // base and off are for debugging only and could be removed.
  1502  //
  1503  // See also wbBufFlush1, which partially duplicates this logic.
  1504  //
  1505  //go:nowritebarrierrec
  1506  func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
  1507  	// obj should be start of allocation, and so must be at least pointer-aligned.
  1508  	if obj&(sys.PtrSize-1) != 0 {
  1509  		throw("greyobject: obj not pointer-aligned")
  1510  	}
  1511  	mbits := span.markBitsForIndex(objIndex)
  1512  
  1513  	if useCheckmark {
  1514  		if !mbits.isMarked() {
  1515  			printlock()
  1516  			print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n")
  1517  			print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n")
  1518  
  1519  			// Dump the source (base) object
  1520  			gcDumpObject("base", base, off)
  1521  
  1522  			// Dump the object
  1523  			gcDumpObject("obj", obj, ^uintptr(0))
  1524  
  1525  			getg().m.traceback = 2
  1526  			throw("checkmark found unmarked object")
  1527  		}
  1528  		hbits := heapBitsForAddr(obj)
  1529  		if hbits.isCheckmarked(span.elemsize) {
  1530  			return
  1531  		}
  1532  		hbits.setCheckmarked(span.elemsize)
  1533  		if !hbits.isCheckmarked(span.elemsize) {
  1534  			throw("setCheckmarked and isCheckmarked disagree")
  1535  		}
  1536  	} else {
  1537  		if debug.gccheckmark > 0 && span.isFree(objIndex) {
  1538  			print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
  1539  			gcDumpObject("base", base, off)
  1540  			gcDumpObject("obj", obj, ^uintptr(0))
  1541  			getg().m.traceback = 2
  1542  			throw("marking free object")
  1543  		}
  1544  
  1545  		// If marked we have nothing to do.
  1546  		if mbits.isMarked() {
  1547  			return
  1548  		}
  1549  		mbits.setMarked()
  1550  
  1551  		// Mark span.
  1552  		arena, pageIdx, pageMask := pageIndexOf(span.base())
  1553  		if arena.pageMarks[pageIdx]&pageMask == 0 {
  1554  			atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
  1555  		}
  1556  
  1557  		// If this is a noscan object, fast-track it to black
  1558  		// instead of greying it.
  1559  		if span.spanclass.noscan() {
  1560  			gcw.bytesMarked += uint64(span.elemsize)
  1561  			return
  1562  		}
  1563  	}
  1564  
  1565  	// Queue the obj for scanning. The PREFETCH(obj) logic has been removed but
  1566  	// seems like a nice optimization that can be added back in.
  1567  	// There needs to be time between the PREFETCH and the use.
  1568  	// Previously we put the obj in an 8 element buffer that is drained at a rate
  1569  	// to give the PREFETCH time to do its work.
  1570  	// Use of PREFETCHNTA might be more appropriate than PREFETCH
  1571  	if !gcw.putFast(obj) {
  1572  		gcw.put(obj)
  1573  	}
  1574  }
  1575  
  1576  // gcDumpObject dumps the contents of obj for debugging and marks the
  1577  // field at byte offset off in obj.
  1578  func gcDumpObject(label string, obj, off uintptr) {
  1579  	s := spanOf(obj)
  1580  	print(label, "=", hex(obj))
  1581  	if s == nil {
  1582  		print(" s=nil\n")
  1583  		return
  1584  	}
  1585  	print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
  1586  	if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) {
  1587  		print(mSpanStateNames[state], "\n")
  1588  	} else {
  1589  		print("unknown(", state, ")\n")
  1590  	}
  1591  
  1592  	skipped := false
  1593  	size := s.elemsize
  1594  	if s.state.get() == mSpanManual && size == 0 {
  1595  		// We're printing something from a stack frame. We
  1596  		// don't know how big it is, so just show up to an
  1597  		// including off.
  1598  		size = off + sys.PtrSize
  1599  	}
  1600  	for i := uintptr(0); i < size; i += sys.PtrSize {
  1601  		// For big objects, just print the beginning (because
  1602  		// that usually hints at the object's type) and the
  1603  		// fields around off.
  1604  		if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) {
  1605  			skipped = true
  1606  			continue
  1607  		}
  1608  		if skipped {
  1609  			print(" ...\n")
  1610  			skipped = false
  1611  		}
  1612  		print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
  1613  		if i == off {
  1614  			print(" <==")
  1615  		}
  1616  		print("\n")
  1617  	}
  1618  	if skipped {
  1619  		print(" ...\n")
  1620  	}
  1621  }
  1622  
  1623  // gcmarknewobject marks a newly allocated object black. obj must
  1624  // not contain any non-nil pointers.
  1625  //
  1626  // This is nosplit so it can manipulate a gcWork without preemption.
  1627  //
  1628  //go:nowritebarrier
  1629  //go:nosplit
  1630  func gcmarknewobject(span *mspan, obj, size, scanSize uintptr) {
  1631  	if useCheckmark { // The world should be stopped so this should not happen.
  1632  		throw("gcmarknewobject called while doing checkmark")
  1633  	}
  1634  
  1635  	// Mark object.
  1636  	objIndex := span.objIndex(obj)
  1637  	span.markBitsForIndex(objIndex).setMarked()
  1638  
  1639  	// Mark span.
  1640  	arena, pageIdx, pageMask := pageIndexOf(span.base())
  1641  	if arena.pageMarks[pageIdx]&pageMask == 0 {
  1642  		atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
  1643  	}
  1644  
  1645  	gcw := &getg().m.p.ptr().gcw
  1646  	gcw.bytesMarked += uint64(size)
  1647  	gcw.scanWork += int64(scanSize)
  1648  }
  1649  
  1650  // gcMarkTinyAllocs greys all active tiny alloc blocks.
  1651  //
  1652  // The world must be stopped.
  1653  func gcMarkTinyAllocs() {
  1654  	for _, p := range allp {
  1655  		c := p.mcache
  1656  		if c == nil || c.tiny == 0 {
  1657  			continue
  1658  		}
  1659  		_, span, objIndex := findObject(c.tiny, 0, 0)
  1660  		gcw := &p.gcw
  1661  		greyobject(c.tiny, 0, 0, span, gcw, objIndex)
  1662  	}
  1663  }
  1664  
  1665  // Checkmarking
  1666  
  1667  // To help debug the concurrent GC we remark with the world
  1668  // stopped ensuring that any object encountered has their normal
  1669  // mark bit set. To do this we use an orthogonal bit
  1670  // pattern to indicate the object is marked. The following pattern
  1671  // uses the upper two bits in the object's boundary nibble.
  1672  // 01: scalar  not marked
  1673  // 10: pointer not marked
  1674  // 11: pointer     marked
  1675  // 00: scalar      marked
  1676  // Xoring with 01 will flip the pattern from marked to unmarked and vica versa.
  1677  // The higher bit is 1 for pointers and 0 for scalars, whether the object
  1678  // is marked or not.
  1679  // The first nibble no longer holds the typeDead pattern indicating that the
  1680  // there are no more pointers in the object. This information is held
  1681  // in the second nibble.
  1682  
  1683  // If useCheckmark is true, marking of an object uses the
  1684  // checkmark bits (encoding above) instead of the standard
  1685  // mark bits.
  1686  var useCheckmark = false
  1687  
  1688  //go:nowritebarrier
  1689  func initCheckmarks() {
  1690  	useCheckmark = true
  1691  	for _, s := range mheap_.allspans {
  1692  		if s.state.get() == mSpanInUse {
  1693  			heapBitsForAddr(s.base()).initCheckmarkSpan(s.layout())
  1694  		}
  1695  	}
  1696  }
  1697  
  1698  func clearCheckmarks() {
  1699  	useCheckmark = false
  1700  	for _, s := range mheap_.allspans {
  1701  		if s.state.get() == mSpanInUse {
  1702  			heapBitsForAddr(s.base()).clearCheckmarkSpan(s.layout())
  1703  		}
  1704  	}
  1705  }
  1706  

View as plain text