...
Run Format

Source file src/runtime/mbitmap.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: type and heap bitmaps.
     6  //
     7  // Stack, data, and bss bitmaps
     8  //
     9  // Stack frames and global variables in the data and bss sections are described
    10  // by 1-bit bitmaps in which 0 means uninteresting and 1 means live pointer
    11  // to be visited during GC. The bits in each byte are consumed starting with
    12  // the low bit: 1<<0, 1<<1, and so on.
    13  //
    14  // Heap bitmap
    15  //
    16  // The heap bitmap comprises 2 bits for each pointer-sized word in the heap,
    17  // stored in the heapArena metadata backing each heap arena.
    18  // That is, if ha is the heapArena for the arena starting a start,
    19  // then ha.bitmap[0] holds the 2-bit entries for the four words start
    20  // through start+3*ptrSize, ha.bitmap[1] holds the entries for
    21  // start+4*ptrSize through start+7*ptrSize, and so on.
    22  //
    23  // In each 2-bit entry, the lower bit holds the same information as in the 1-bit
    24  // bitmaps: 0 means uninteresting and 1 means live pointer to be visited during GC.
    25  // The meaning of the high bit depends on the position of the word being described
    26  // in its allocated object. In all words *except* the second word, the
    27  // high bit indicates that the object is still being described. In
    28  // these words, if a bit pair with a high bit 0 is encountered, the
    29  // low bit can also be assumed to be 0, and the object description is
    30  // over. This 00 is called the ``dead'' encoding: it signals that the
    31  // rest of the words in the object are uninteresting to the garbage
    32  // collector.
    33  //
    34  // In the second word, the high bit is the GC ``checkmarked'' bit (see below).
    35  //
    36  // The 2-bit entries are split when written into the byte, so that the top half
    37  // of the byte contains 4 high bits and the bottom half contains 4 low (pointer)
    38  // bits.
    39  // This form allows a copy from the 1-bit to the 4-bit form to keep the
    40  // pointer bits contiguous, instead of having to space them out.
    41  //
    42  // The code makes use of the fact that the zero value for a heap bitmap
    43  // has no live pointer bit set and is (depending on position), not used,
    44  // not checkmarked, and is the dead encoding.
    45  // These properties must be preserved when modifying the encoding.
    46  //
    47  // The bitmap for noscan spans is not maintained. Code must ensure
    48  // that an object is scannable before consulting its bitmap by
    49  // checking either the noscan bit in the span or by consulting its
    50  // type's information.
    51  //
    52  // Checkmarks
    53  //
    54  // In a concurrent garbage collector, one worries about failing to mark
    55  // a live object due to mutations without write barriers or bugs in the
    56  // collector implementation. As a sanity check, the GC has a 'checkmark'
    57  // mode that retraverses the object graph with the world stopped, to make
    58  // sure that everything that should be marked is marked.
    59  // In checkmark mode, in the heap bitmap, the high bit of the 2-bit entry
    60  // for the second word of the object holds the checkmark bit.
    61  // When not in checkmark mode, this bit is set to 1.
    62  //
    63  // The smallest possible allocation is 8 bytes. On a 32-bit machine, that
    64  // means every allocated object has two words, so there is room for the
    65  // checkmark bit. On a 64-bit machine, however, the 8-byte allocation is
    66  // just one word, so the second bit pair is not available for encoding the
    67  // checkmark. However, because non-pointer allocations are combined
    68  // into larger 16-byte (maxTinySize) allocations, a plain 8-byte allocation
    69  // must be a pointer, so the type bit in the first word is not actually needed.
    70  // It is still used in general, except in checkmark the type bit is repurposed
    71  // as the checkmark bit and then reinitialized (to 1) as the type bit when
    72  // finished.
    73  //
    74  
    75  package runtime
    76  
    77  import (
    78  	"runtime/internal/atomic"
    79  	"runtime/internal/sys"
    80  	"unsafe"
    81  )
    82  
    83  const (
    84  	bitPointer = 1 << 0
    85  	bitScan    = 1 << 4
    86  
    87  	heapBitsShift      = 1     // shift offset between successive bitPointer or bitScan entries
    88  	wordsPerBitmapByte = 8 / 2 // heap words described by one bitmap byte
    89  
    90  	// all scan/pointer bits in a byte
    91  	bitScanAll    = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
    92  	bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
    93  )
    94  
    95  // addb returns the byte pointer p+n.
    96  //go:nowritebarrier
    97  //go:nosplit
    98  func addb(p *byte, n uintptr) *byte {
    99  	// Note: wrote out full expression instead of calling add(p, n)
   100  	// to reduce the number of temporaries generated by the
   101  	// compiler for this trivial expression during inlining.
   102  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
   103  }
   104  
   105  // subtractb returns the byte pointer p-n.
   106  //go:nowritebarrier
   107  //go:nosplit
   108  func subtractb(p *byte, n uintptr) *byte {
   109  	// Note: wrote out full expression instead of calling add(p, -n)
   110  	// to reduce the number of temporaries generated by the
   111  	// compiler for this trivial expression during inlining.
   112  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
   113  }
   114  
   115  // add1 returns the byte pointer p+1.
   116  //go:nowritebarrier
   117  //go:nosplit
   118  func add1(p *byte) *byte {
   119  	// Note: wrote out full expression instead of calling addb(p, 1)
   120  	// to reduce the number of temporaries generated by the
   121  	// compiler for this trivial expression during inlining.
   122  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
   123  }
   124  
   125  // subtract1 returns the byte pointer p-1.
   126  //go:nowritebarrier
   127  //
   128  // nosplit because it is used during write barriers and must not be preempted.
   129  //go:nosplit
   130  func subtract1(p *byte) *byte {
   131  	// Note: wrote out full expression instead of calling subtractb(p, 1)
   132  	// to reduce the number of temporaries generated by the
   133  	// compiler for this trivial expression during inlining.
   134  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
   135  }
   136  
   137  // heapBits provides access to the bitmap bits for a single heap word.
   138  // The methods on heapBits take value receivers so that the compiler
   139  // can more easily inline calls to those methods and registerize the
   140  // struct fields independently.
   141  type heapBits struct {
   142  	bitp  *uint8
   143  	shift uint32
   144  	arena uint32 // Index of heap arena containing bitp
   145  	last  *uint8 // Last byte arena's bitmap
   146  }
   147  
   148  // Make the compiler check that heapBits.arena is large enough to hold
   149  // the maximum arena frame number.
   150  var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1}
   151  
   152  // markBits provides access to the mark bit for an object in the heap.
   153  // bytep points to the byte holding the mark bit.
   154  // mask is a byte with a single bit set that can be &ed with *bytep
   155  // to see if the bit has been set.
   156  // *m.byte&m.mask != 0 indicates the mark bit is set.
   157  // index can be used along with span information to generate
   158  // the address of the object in the heap.
   159  // We maintain one set of mark bits for allocation and one for
   160  // marking purposes.
   161  type markBits struct {
   162  	bytep *uint8
   163  	mask  uint8
   164  	index uintptr
   165  }
   166  
   167  //go:nosplit
   168  func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
   169  	bytep, mask := s.allocBits.bitp(allocBitIndex)
   170  	return markBits{bytep, mask, allocBitIndex}
   171  }
   172  
   173  // refillAllocCache takes 8 bytes s.allocBits starting at whichByte
   174  // and negates them so that ctz (count trailing zeros) instructions
   175  // can be used. It then places these 8 bytes into the cached 64 bit
   176  // s.allocCache.
   177  func (s *mspan) refillAllocCache(whichByte uintptr) {
   178  	bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
   179  	aCache := uint64(0)
   180  	aCache |= uint64(bytes[0])
   181  	aCache |= uint64(bytes[1]) << (1 * 8)
   182  	aCache |= uint64(bytes[2]) << (2 * 8)
   183  	aCache |= uint64(bytes[3]) << (3 * 8)
   184  	aCache |= uint64(bytes[4]) << (4 * 8)
   185  	aCache |= uint64(bytes[5]) << (5 * 8)
   186  	aCache |= uint64(bytes[6]) << (6 * 8)
   187  	aCache |= uint64(bytes[7]) << (7 * 8)
   188  	s.allocCache = ^aCache
   189  }
   190  
   191  // nextFreeIndex returns the index of the next free object in s at
   192  // or after s.freeindex.
   193  // There are hardware instructions that can be used to make this
   194  // faster if profiling warrants it.
   195  func (s *mspan) nextFreeIndex() uintptr {
   196  	sfreeindex := s.freeindex
   197  	snelems := s.nelems
   198  	if sfreeindex == snelems {
   199  		return sfreeindex
   200  	}
   201  	if sfreeindex > snelems {
   202  		throw("s.freeindex > s.nelems")
   203  	}
   204  
   205  	aCache := s.allocCache
   206  
   207  	bitIndex := sys.Ctz64(aCache)
   208  	for bitIndex == 64 {
   209  		// Move index to start of next cached bits.
   210  		sfreeindex = (sfreeindex + 64) &^ (64 - 1)
   211  		if sfreeindex >= snelems {
   212  			s.freeindex = snelems
   213  			return snelems
   214  		}
   215  		whichByte := sfreeindex / 8
   216  		// Refill s.allocCache with the next 64 alloc bits.
   217  		s.refillAllocCache(whichByte)
   218  		aCache = s.allocCache
   219  		bitIndex = sys.Ctz64(aCache)
   220  		// nothing available in cached bits
   221  		// grab the next 8 bytes and try again.
   222  	}
   223  	result := sfreeindex + uintptr(bitIndex)
   224  	if result >= snelems {
   225  		s.freeindex = snelems
   226  		return snelems
   227  	}
   228  
   229  	s.allocCache >>= uint(bitIndex + 1)
   230  	sfreeindex = result + 1
   231  
   232  	if sfreeindex%64 == 0 && sfreeindex != snelems {
   233  		// We just incremented s.freeindex so it isn't 0.
   234  		// As each 1 in s.allocCache was encountered and used for allocation
   235  		// it was shifted away. At this point s.allocCache contains all 0s.
   236  		// Refill s.allocCache so that it corresponds
   237  		// to the bits at s.allocBits starting at s.freeindex.
   238  		whichByte := sfreeindex / 8
   239  		s.refillAllocCache(whichByte)
   240  	}
   241  	s.freeindex = sfreeindex
   242  	return result
   243  }
   244  
   245  // isFree returns whether the index'th object in s is unallocated.
   246  func (s *mspan) isFree(index uintptr) bool {
   247  	if index < s.freeindex {
   248  		return false
   249  	}
   250  	bytep, mask := s.allocBits.bitp(index)
   251  	return *bytep&mask == 0
   252  }
   253  
   254  func (s *mspan) objIndex(p uintptr) uintptr {
   255  	byteOffset := p - s.base()
   256  	if byteOffset == 0 {
   257  		return 0
   258  	}
   259  	if s.baseMask != 0 {
   260  		// s.baseMask is non-0, elemsize is a power of two, so shift by s.divShift
   261  		return byteOffset >> s.divShift
   262  	}
   263  	return uintptr(((uint64(byteOffset) >> s.divShift) * uint64(s.divMul)) >> s.divShift2)
   264  }
   265  
   266  func markBitsForAddr(p uintptr) markBits {
   267  	s := spanOf(p)
   268  	objIndex := s.objIndex(p)
   269  	return s.markBitsForIndex(objIndex)
   270  }
   271  
   272  func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
   273  	bytep, mask := s.gcmarkBits.bitp(objIndex)
   274  	return markBits{bytep, mask, objIndex}
   275  }
   276  
   277  func (s *mspan) markBitsForBase() markBits {
   278  	return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
   279  }
   280  
   281  // isMarked reports whether mark bit m is set.
   282  func (m markBits) isMarked() bool {
   283  	return *m.bytep&m.mask != 0
   284  }
   285  
   286  // setMarked sets the marked bit in the markbits, atomically. Some compilers
   287  // are not able to inline atomic.Or8 function so if it appears as a hot spot consider
   288  // inlining it manually.
   289  func (m markBits) setMarked() {
   290  	// Might be racing with other updates, so use atomic update always.
   291  	// We used to be clever here and use a non-atomic update in certain
   292  	// cases, but it's not worth the risk.
   293  	atomic.Or8(m.bytep, m.mask)
   294  }
   295  
   296  // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
   297  func (m markBits) setMarkedNonAtomic() {
   298  	*m.bytep |= m.mask
   299  }
   300  
   301  // clearMarked clears the marked bit in the markbits, atomically.
   302  func (m markBits) clearMarked() {
   303  	// Might be racing with other updates, so use atomic update always.
   304  	// We used to be clever here and use a non-atomic update in certain
   305  	// cases, but it's not worth the risk.
   306  	atomic.And8(m.bytep, ^m.mask)
   307  }
   308  
   309  // markBitsForSpan returns the markBits for the span base address base.
   310  func markBitsForSpan(base uintptr) (mbits markBits) {
   311  	mbits = markBitsForAddr(base)
   312  	if mbits.mask != 1 {
   313  		throw("markBitsForSpan: unaligned start")
   314  	}
   315  	return mbits
   316  }
   317  
   318  // advance advances the markBits to the next object in the span.
   319  func (m *markBits) advance() {
   320  	if m.mask == 1<<7 {
   321  		m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
   322  		m.mask = 1
   323  	} else {
   324  		m.mask = m.mask << 1
   325  	}
   326  	m.index++
   327  }
   328  
   329  // heapBitsForAddr returns the heapBits for the address addr.
   330  // The caller must ensure addr is in an allocated span.
   331  // In particular, be careful not to point past the end of an object.
   332  //
   333  // nosplit because it is used during write barriers and must not be preempted.
   334  //go:nosplit
   335  func heapBitsForAddr(addr uintptr) (h heapBits) {
   336  	// 2 bits per word, 4 pairs per byte, and a mask is hard coded.
   337  	arena := arenaIndex(addr)
   338  	ha := mheap_.arenas[arena.l1()][arena.l2()]
   339  	// The compiler uses a load for nil checking ha, but in this
   340  	// case we'll almost never hit that cache line again, so it
   341  	// makes more sense to do a value check.
   342  	if ha == nil {
   343  		// addr is not in the heap. Return nil heapBits, which
   344  		// we expect to crash in the caller.
   345  		return
   346  	}
   347  	h.bitp = &ha.bitmap[(addr/(sys.PtrSize*4))%heapArenaBitmapBytes]
   348  	h.shift = uint32((addr / sys.PtrSize) & 3)
   349  	h.arena = uint32(arena)
   350  	h.last = &ha.bitmap[len(ha.bitmap)-1]
   351  	return
   352  }
   353  
   354  // findObject returns the base address for the heap object containing
   355  // the address p, the object's span, and the index of the object in s.
   356  // If p does not point into a heap object, it returns base == 0.
   357  //
   358  // If p points is an invalid heap pointer and debug.invalidptr != 0,
   359  // findObject panics.
   360  //
   361  // refBase and refOff optionally give the base address of the object
   362  // in which the pointer p was found and the byte offset at which it
   363  // was found. These are used for error reporting.
   364  func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
   365  	s = spanOf(p)
   366  	// If p is a bad pointer, it may not be in s's bounds.
   367  	if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
   368  		if s == nil || s.state == _MSpanManual {
   369  			// If s is nil, the virtual address has never been part of the heap.
   370  			// This pointer may be to some mmap'd region, so we allow it.
   371  			// Pointers into stacks are also ok, the runtime manages these explicitly.
   372  			return
   373  		}
   374  
   375  		// The following ensures that we are rigorous about what data
   376  		// structures hold valid pointers.
   377  		if debug.invalidptr != 0 {
   378  			// Typically this indicates an incorrect use
   379  			// of unsafe or cgo to store a bad pointer in
   380  			// the Go heap. It may also indicate a runtime
   381  			// bug.
   382  			//
   383  			// TODO(austin): We could be more aggressive
   384  			// and detect pointers to unallocated objects
   385  			// in allocated spans.
   386  			printlock()
   387  			print("runtime: pointer ", hex(p))
   388  			if s.state != mSpanInUse {
   389  				print(" to unallocated span")
   390  			} else {
   391  				print(" to unused region of span")
   392  			}
   393  			print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
   394  			if refBase != 0 {
   395  				print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
   396  				gcDumpObject("object", refBase, refOff)
   397  			}
   398  			getg().m.traceback = 2
   399  			throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
   400  		}
   401  		return
   402  	}
   403  	// If this span holds object of a power of 2 size, just mask off the bits to
   404  	// the interior of the object. Otherwise use the size to get the base.
   405  	if s.baseMask != 0 {
   406  		// optimize for power of 2 sized objects.
   407  		base = s.base()
   408  		base = base + (p-base)&uintptr(s.baseMask)
   409  		objIndex = (base - s.base()) >> s.divShift
   410  		// base = p & s.baseMask is faster for small spans,
   411  		// but doesn't work for large spans.
   412  		// Overall, it's faster to use the more general computation above.
   413  	} else {
   414  		base = s.base()
   415  		if p-base >= s.elemsize {
   416  			// n := (p - base) / s.elemsize, using division by multiplication
   417  			objIndex = uintptr(p-base) >> s.divShift * uintptr(s.divMul) >> s.divShift2
   418  			base += objIndex * s.elemsize
   419  		}
   420  	}
   421  	return
   422  }
   423  
   424  // next returns the heapBits describing the next pointer-sized word in memory.
   425  // That is, if h describes address p, h.next() describes p+ptrSize.
   426  // Note that next does not modify h. The caller must record the result.
   427  //
   428  // nosplit because it is used during write barriers and must not be preempted.
   429  //go:nosplit
   430  func (h heapBits) next() heapBits {
   431  	if h.shift < 3*heapBitsShift {
   432  		h.shift += heapBitsShift
   433  	} else if h.bitp != h.last {
   434  		h.bitp, h.shift = add1(h.bitp), 0
   435  	} else {
   436  		// Move to the next arena.
   437  		return h.nextArena()
   438  	}
   439  	return h
   440  }
   441  
   442  // nextArena advances h to the beginning of the next heap arena.
   443  //
   444  // This is a slow-path helper to next. gc's inliner knows that
   445  // heapBits.next can be inlined even though it calls this. This is
   446  // marked noinline so it doesn't get inlined into next and cause next
   447  // to be too big to inline.
   448  //
   449  //go:nosplit
   450  //go:noinline
   451  func (h heapBits) nextArena() heapBits {
   452  	h.arena++
   453  	ai := arenaIdx(h.arena)
   454  	l2 := mheap_.arenas[ai.l1()]
   455  	if l2 == nil {
   456  		// We just passed the end of the object, which
   457  		// was also the end of the heap. Poison h. It
   458  		// should never be dereferenced at this point.
   459  		return heapBits{}
   460  	}
   461  	ha := l2[ai.l2()]
   462  	if ha == nil {
   463  		return heapBits{}
   464  	}
   465  	h.bitp, h.shift = &ha.bitmap[0], 0
   466  	h.last = &ha.bitmap[len(ha.bitmap)-1]
   467  	return h
   468  }
   469  
   470  // forward returns the heapBits describing n pointer-sized words ahead of h in memory.
   471  // That is, if h describes address p, h.forward(n) describes p+n*ptrSize.
   472  // h.forward(1) is equivalent to h.next(), just slower.
   473  // Note that forward does not modify h. The caller must record the result.
   474  // bits returns the heap bits for the current word.
   475  //go:nosplit
   476  func (h heapBits) forward(n uintptr) heapBits {
   477  	n += uintptr(h.shift) / heapBitsShift
   478  	nbitp := uintptr(unsafe.Pointer(h.bitp)) + n/4
   479  	h.shift = uint32(n%4) * heapBitsShift
   480  	if nbitp <= uintptr(unsafe.Pointer(h.last)) {
   481  		h.bitp = (*uint8)(unsafe.Pointer(nbitp))
   482  		return h
   483  	}
   484  
   485  	// We're in a new heap arena.
   486  	past := nbitp - (uintptr(unsafe.Pointer(h.last)) + 1)
   487  	h.arena += 1 + uint32(past/heapArenaBitmapBytes)
   488  	ai := arenaIdx(h.arena)
   489  	if l2 := mheap_.arenas[ai.l1()]; l2 != nil && l2[ai.l2()] != nil {
   490  		a := l2[ai.l2()]
   491  		h.bitp = &a.bitmap[past%heapArenaBitmapBytes]
   492  		h.last = &a.bitmap[len(a.bitmap)-1]
   493  	} else {
   494  		h.bitp, h.last = nil, nil
   495  	}
   496  	return h
   497  }
   498  
   499  // forwardOrBoundary is like forward, but stops at boundaries between
   500  // contiguous sections of the bitmap. It returns the number of words
   501  // advanced over, which will be <= n.
   502  func (h heapBits) forwardOrBoundary(n uintptr) (heapBits, uintptr) {
   503  	maxn := 4 * ((uintptr(unsafe.Pointer(h.last)) + 1) - uintptr(unsafe.Pointer(h.bitp)))
   504  	if n > maxn {
   505  		n = maxn
   506  	}
   507  	return h.forward(n), n
   508  }
   509  
   510  // The caller can test morePointers and isPointer by &-ing with bitScan and bitPointer.
   511  // The result includes in its higher bits the bits for subsequent words
   512  // described by the same bitmap byte.
   513  //
   514  // nosplit because it is used during write barriers and must not be preempted.
   515  //go:nosplit
   516  func (h heapBits) bits() uint32 {
   517  	// The (shift & 31) eliminates a test and conditional branch
   518  	// from the generated code.
   519  	return uint32(*h.bitp) >> (h.shift & 31)
   520  }
   521  
   522  // morePointers returns true if this word and all remaining words in this object
   523  // are scalars.
   524  // h must not describe the second word of the object.
   525  func (h heapBits) morePointers() bool {
   526  	return h.bits()&bitScan != 0
   527  }
   528  
   529  // isPointer reports whether the heap bits describe a pointer word.
   530  //
   531  // nosplit because it is used during write barriers and must not be preempted.
   532  //go:nosplit
   533  func (h heapBits) isPointer() bool {
   534  	return h.bits()&bitPointer != 0
   535  }
   536  
   537  // isCheckmarked reports whether the heap bits have the checkmarked bit set.
   538  // It must be told how large the object at h is, because the encoding of the
   539  // checkmark bit varies by size.
   540  // h must describe the initial word of the object.
   541  func (h heapBits) isCheckmarked(size uintptr) bool {
   542  	if size == sys.PtrSize {
   543  		return (*h.bitp>>h.shift)&bitPointer != 0
   544  	}
   545  	// All multiword objects are 2-word aligned,
   546  	// so we know that the initial word's 2-bit pair
   547  	// and the second word's 2-bit pair are in the
   548  	// same heap bitmap byte, *h.bitp.
   549  	return (*h.bitp>>(heapBitsShift+h.shift))&bitScan != 0
   550  }
   551  
   552  // setCheckmarked sets the checkmarked bit.
   553  // It must be told how large the object at h is, because the encoding of the
   554  // checkmark bit varies by size.
   555  // h must describe the initial word of the object.
   556  func (h heapBits) setCheckmarked(size uintptr) {
   557  	if size == sys.PtrSize {
   558  		atomic.Or8(h.bitp, bitPointer<<h.shift)
   559  		return
   560  	}
   561  	atomic.Or8(h.bitp, bitScan<<(heapBitsShift+h.shift))
   562  }
   563  
   564  // bulkBarrierPreWrite executes a write barrier
   565  // for every pointer slot in the memory range [src, src+size),
   566  // using pointer/scalar information from [dst, dst+size).
   567  // This executes the write barriers necessary before a memmove.
   568  // src, dst, and size must be pointer-aligned.
   569  // The range [dst, dst+size) must lie within a single object.
   570  // It does not perform the actual writes.
   571  //
   572  // As a special case, src == 0 indicates that this is being used for a
   573  // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
   574  // barrier.
   575  //
   576  // Callers should call bulkBarrierPreWrite immediately before
   577  // calling memmove(dst, src, size). This function is marked nosplit
   578  // to avoid being preempted; the GC must not stop the goroutine
   579  // between the memmove and the execution of the barriers.
   580  // The caller is also responsible for cgo pointer checks if this
   581  // may be writing Go pointers into non-Go memory.
   582  //
   583  // The pointer bitmap is not maintained for allocations containing
   584  // no pointers at all; any caller of bulkBarrierPreWrite must first
   585  // make sure the underlying allocation contains pointers, usually
   586  // by checking typ.kind&kindNoPointers.
   587  //
   588  // Callers must perform cgo checks if writeBarrier.cgo.
   589  //
   590  //go:nosplit
   591  func bulkBarrierPreWrite(dst, src, size uintptr) {
   592  	if (dst|src|size)&(sys.PtrSize-1) != 0 {
   593  		throw("bulkBarrierPreWrite: unaligned arguments")
   594  	}
   595  	if !writeBarrier.needed {
   596  		return
   597  	}
   598  	if s := spanOf(dst); s == nil {
   599  		// If dst is a global, use the data or BSS bitmaps to
   600  		// execute write barriers.
   601  		for _, datap := range activeModules() {
   602  			if datap.data <= dst && dst < datap.edata {
   603  				bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
   604  				return
   605  			}
   606  		}
   607  		for _, datap := range activeModules() {
   608  			if datap.bss <= dst && dst < datap.ebss {
   609  				bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
   610  				return
   611  			}
   612  		}
   613  		return
   614  	} else if s.state != _MSpanInUse || dst < s.base() || s.limit <= dst {
   615  		// dst was heap memory at some point, but isn't now.
   616  		// It can't be a global. It must be either our stack,
   617  		// or in the case of direct channel sends, it could be
   618  		// another stack. Either way, no need for barriers.
   619  		// This will also catch if dst is in a freed span,
   620  		// though that should never have.
   621  		return
   622  	}
   623  
   624  	buf := &getg().m.p.ptr().wbBuf
   625  	h := heapBitsForAddr(dst)
   626  	if src == 0 {
   627  		for i := uintptr(0); i < size; i += sys.PtrSize {
   628  			if h.isPointer() {
   629  				dstx := (*uintptr)(unsafe.Pointer(dst + i))
   630  				if !buf.putFast(*dstx, 0) {
   631  					wbBufFlush(nil, 0)
   632  				}
   633  			}
   634  			h = h.next()
   635  		}
   636  	} else {
   637  		for i := uintptr(0); i < size; i += sys.PtrSize {
   638  			if h.isPointer() {
   639  				dstx := (*uintptr)(unsafe.Pointer(dst + i))
   640  				srcx := (*uintptr)(unsafe.Pointer(src + i))
   641  				if !buf.putFast(*dstx, *srcx) {
   642  					wbBufFlush(nil, 0)
   643  				}
   644  			}
   645  			h = h.next()
   646  		}
   647  	}
   648  }
   649  
   650  // bulkBarrierBitmap executes write barriers for copying from [src,
   651  // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
   652  // assumed to start maskOffset bytes into the data covered by the
   653  // bitmap in bits (which may not be a multiple of 8).
   654  //
   655  // This is used by bulkBarrierPreWrite for writes to data and BSS.
   656  //
   657  //go:nosplit
   658  func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
   659  	word := maskOffset / sys.PtrSize
   660  	bits = addb(bits, word/8)
   661  	mask := uint8(1) << (word % 8)
   662  
   663  	buf := &getg().m.p.ptr().wbBuf
   664  	for i := uintptr(0); i < size; i += sys.PtrSize {
   665  		if mask == 0 {
   666  			bits = addb(bits, 1)
   667  			if *bits == 0 {
   668  				// Skip 8 words.
   669  				i += 7 * sys.PtrSize
   670  				continue
   671  			}
   672  			mask = 1
   673  		}
   674  		if *bits&mask != 0 {
   675  			dstx := (*uintptr)(unsafe.Pointer(dst + i))
   676  			if src == 0 {
   677  				if !buf.putFast(*dstx, 0) {
   678  					wbBufFlush(nil, 0)
   679  				}
   680  			} else {
   681  				srcx := (*uintptr)(unsafe.Pointer(src + i))
   682  				if !buf.putFast(*dstx, *srcx) {
   683  					wbBufFlush(nil, 0)
   684  				}
   685  			}
   686  		}
   687  		mask <<= 1
   688  	}
   689  }
   690  
   691  // typeBitsBulkBarrier executes a write barrier for every
   692  // pointer that would be copied from [src, src+size) to [dst,
   693  // dst+size) by a memmove using the type bitmap to locate those
   694  // pointer slots.
   695  //
   696  // The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
   697  // dst, src, and size must be pointer-aligned.
   698  // The type typ must have a plain bitmap, not a GC program.
   699  // The only use of this function is in channel sends, and the
   700  // 64 kB channel element limit takes care of this for us.
   701  //
   702  // Must not be preempted because it typically runs right before memmove,
   703  // and the GC must observe them as an atomic action.
   704  //
   705  // Callers must perform cgo checks if writeBarrier.cgo.
   706  //
   707  //go:nosplit
   708  func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
   709  	if typ == nil {
   710  		throw("runtime: typeBitsBulkBarrier without type")
   711  	}
   712  	if typ.size != size {
   713  		println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
   714  		throw("runtime: invalid typeBitsBulkBarrier")
   715  	}
   716  	if typ.kind&kindGCProg != 0 {
   717  		println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
   718  		throw("runtime: invalid typeBitsBulkBarrier")
   719  	}
   720  	if !writeBarrier.needed {
   721  		return
   722  	}
   723  	ptrmask := typ.gcdata
   724  	buf := &getg().m.p.ptr().wbBuf
   725  	var bits uint32
   726  	for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
   727  		if i&(sys.PtrSize*8-1) == 0 {
   728  			bits = uint32(*ptrmask)
   729  			ptrmask = addb(ptrmask, 1)
   730  		} else {
   731  			bits = bits >> 1
   732  		}
   733  		if bits&1 != 0 {
   734  			dstx := (*uintptr)(unsafe.Pointer(dst + i))
   735  			srcx := (*uintptr)(unsafe.Pointer(src + i))
   736  			if !buf.putFast(*dstx, *srcx) {
   737  				wbBufFlush(nil, 0)
   738  			}
   739  		}
   740  	}
   741  }
   742  
   743  // The methods operating on spans all require that h has been returned
   744  // by heapBitsForSpan and that size, n, total are the span layout description
   745  // returned by the mspan's layout method.
   746  // If total > size*n, it means that there is extra leftover memory in the span,
   747  // usually due to rounding.
   748  //
   749  // TODO(rsc): Perhaps introduce a different heapBitsSpan type.
   750  
   751  // initSpan initializes the heap bitmap for a span.
   752  // It clears all checkmark bits.
   753  // If this is a span of pointer-sized objects, it initializes all
   754  // words to pointer/scan.
   755  // Otherwise, it initializes all words to scalar/dead.
   756  func (h heapBits) initSpan(s *mspan) {
   757  	size, n, total := s.layout()
   758  
   759  	// Init the markbit structures
   760  	s.freeindex = 0
   761  	s.allocCache = ^uint64(0) // all 1s indicating all free.
   762  	s.nelems = n
   763  	s.allocBits = nil
   764  	s.gcmarkBits = nil
   765  	s.gcmarkBits = newMarkBits(s.nelems)
   766  	s.allocBits = newAllocBits(s.nelems)
   767  
   768  	// Clear bits corresponding to objects.
   769  	nw := total / sys.PtrSize
   770  	if nw%wordsPerBitmapByte != 0 {
   771  		throw("initSpan: unaligned length")
   772  	}
   773  	if h.shift != 0 {
   774  		throw("initSpan: unaligned base")
   775  	}
   776  	for nw > 0 {
   777  		hNext, anw := h.forwardOrBoundary(nw)
   778  		nbyte := anw / wordsPerBitmapByte
   779  		if sys.PtrSize == 8 && size == sys.PtrSize {
   780  			bitp := h.bitp
   781  			for i := uintptr(0); i < nbyte; i++ {
   782  				*bitp = bitPointerAll | bitScanAll
   783  				bitp = add1(bitp)
   784  			}
   785  		} else {
   786  			memclrNoHeapPointers(unsafe.Pointer(h.bitp), nbyte)
   787  		}
   788  		h = hNext
   789  		nw -= anw
   790  	}
   791  }
   792  
   793  // initCheckmarkSpan initializes a span for being checkmarked.
   794  // It clears the checkmark bits, which are set to 1 in normal operation.
   795  func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
   796  	// The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
   797  	if sys.PtrSize == 8 && size == sys.PtrSize {
   798  		// Checkmark bit is type bit, bottom bit of every 2-bit entry.
   799  		// Only possible on 64-bit system, since minimum size is 8.
   800  		// Must clear type bit (checkmark bit) of every word.
   801  		// The type bit is the lower of every two-bit pair.
   802  		for i := uintptr(0); i < n; i += wordsPerBitmapByte {
   803  			*h.bitp &^= bitPointerAll
   804  			h = h.forward(wordsPerBitmapByte)
   805  		}
   806  		return
   807  	}
   808  	for i := uintptr(0); i < n; i++ {
   809  		*h.bitp &^= bitScan << (heapBitsShift + h.shift)
   810  		h = h.forward(size / sys.PtrSize)
   811  	}
   812  }
   813  
   814  // clearCheckmarkSpan undoes all the checkmarking in a span.
   815  // The actual checkmark bits are ignored, so the only work to do
   816  // is to fix the pointer bits. (Pointer bits are ignored by scanobject
   817  // but consulted by typedmemmove.)
   818  func (h heapBits) clearCheckmarkSpan(size, n, total uintptr) {
   819  	// The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
   820  	if sys.PtrSize == 8 && size == sys.PtrSize {
   821  		// Checkmark bit is type bit, bottom bit of every 2-bit entry.
   822  		// Only possible on 64-bit system, since minimum size is 8.
   823  		// Must clear type bit (checkmark bit) of every word.
   824  		// The type bit is the lower of every two-bit pair.
   825  		for i := uintptr(0); i < n; i += wordsPerBitmapByte {
   826  			*h.bitp |= bitPointerAll
   827  			h = h.forward(wordsPerBitmapByte)
   828  		}
   829  	}
   830  }
   831  
   832  // oneBitCount is indexed by byte and produces the
   833  // number of 1 bits in that byte. For example 128 has 1 bit set
   834  // and oneBitCount[128] will holds 1.
   835  var oneBitCount = [256]uint8{
   836  	0, 1, 1, 2, 1, 2, 2, 3,
   837  	1, 2, 2, 3, 2, 3, 3, 4,
   838  	1, 2, 2, 3, 2, 3, 3, 4,
   839  	2, 3, 3, 4, 3, 4, 4, 5,
   840  	1, 2, 2, 3, 2, 3, 3, 4,
   841  	2, 3, 3, 4, 3, 4, 4, 5,
   842  	2, 3, 3, 4, 3, 4, 4, 5,
   843  	3, 4, 4, 5, 4, 5, 5, 6,
   844  	1, 2, 2, 3, 2, 3, 3, 4,
   845  	2, 3, 3, 4, 3, 4, 4, 5,
   846  	2, 3, 3, 4, 3, 4, 4, 5,
   847  	3, 4, 4, 5, 4, 5, 5, 6,
   848  	2, 3, 3, 4, 3, 4, 4, 5,
   849  	3, 4, 4, 5, 4, 5, 5, 6,
   850  	3, 4, 4, 5, 4, 5, 5, 6,
   851  	4, 5, 5, 6, 5, 6, 6, 7,
   852  	1, 2, 2, 3, 2, 3, 3, 4,
   853  	2, 3, 3, 4, 3, 4, 4, 5,
   854  	2, 3, 3, 4, 3, 4, 4, 5,
   855  	3, 4, 4, 5, 4, 5, 5, 6,
   856  	2, 3, 3, 4, 3, 4, 4, 5,
   857  	3, 4, 4, 5, 4, 5, 5, 6,
   858  	3, 4, 4, 5, 4, 5, 5, 6,
   859  	4, 5, 5, 6, 5, 6, 6, 7,
   860  	2, 3, 3, 4, 3, 4, 4, 5,
   861  	3, 4, 4, 5, 4, 5, 5, 6,
   862  	3, 4, 4, 5, 4, 5, 5, 6,
   863  	4, 5, 5, 6, 5, 6, 6, 7,
   864  	3, 4, 4, 5, 4, 5, 5, 6,
   865  	4, 5, 5, 6, 5, 6, 6, 7,
   866  	4, 5, 5, 6, 5, 6, 6, 7,
   867  	5, 6, 6, 7, 6, 7, 7, 8}
   868  
   869  // countAlloc returns the number of objects allocated in span s by
   870  // scanning the allocation bitmap.
   871  // TODO:(rlh) Use popcount intrinsic.
   872  func (s *mspan) countAlloc() int {
   873  	count := 0
   874  	maxIndex := s.nelems / 8
   875  	for i := uintptr(0); i < maxIndex; i++ {
   876  		mrkBits := *s.gcmarkBits.bytep(i)
   877  		count += int(oneBitCount[mrkBits])
   878  	}
   879  	if bitsInLastByte := s.nelems % 8; bitsInLastByte != 0 {
   880  		mrkBits := *s.gcmarkBits.bytep(maxIndex)
   881  		mask := uint8((1 << bitsInLastByte) - 1)
   882  		bits := mrkBits & mask
   883  		count += int(oneBitCount[bits])
   884  	}
   885  	return count
   886  }
   887  
   888  // heapBitsSetType records that the new allocation [x, x+size)
   889  // holds in [x, x+dataSize) one or more values of type typ.
   890  // (The number of values is given by dataSize / typ.size.)
   891  // If dataSize < size, the fragment [x+dataSize, x+size) is
   892  // recorded as non-pointer data.
   893  // It is known that the type has pointers somewhere;
   894  // malloc does not call heapBitsSetType when there are no pointers,
   895  // because all free objects are marked as noscan during
   896  // heapBitsSweepSpan.
   897  //
   898  // There can only be one allocation from a given span active at a time,
   899  // and the bitmap for a span always falls on byte boundaries,
   900  // so there are no write-write races for access to the heap bitmap.
   901  // Hence, heapBitsSetType can access the bitmap without atomics.
   902  //
   903  // There can be read-write races between heapBitsSetType and things
   904  // that read the heap bitmap like scanobject. However, since
   905  // heapBitsSetType is only used for objects that have not yet been
   906  // made reachable, readers will ignore bits being modified by this
   907  // function. This does mean this function cannot transiently modify
   908  // bits that belong to neighboring objects. Also, on weakly-ordered
   909  // machines, callers must execute a store/store (publication) barrier
   910  // between calling this function and making the object reachable.
   911  func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
   912  	const doubleCheck = false // slow but helpful; enable to test modifications to this code
   913  
   914  	// dataSize is always size rounded up to the next malloc size class,
   915  	// except in the case of allocating a defer block, in which case
   916  	// size is sizeof(_defer{}) (at least 6 words) and dataSize may be
   917  	// arbitrarily larger.
   918  	//
   919  	// The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore
   920  	// assume that dataSize == size without checking it explicitly.
   921  
   922  	if sys.PtrSize == 8 && size == sys.PtrSize {
   923  		// It's one word and it has pointers, it must be a pointer.
   924  		// Since all allocated one-word objects are pointers
   925  		// (non-pointers are aggregated into tinySize allocations),
   926  		// initSpan sets the pointer bits for us. Nothing to do here.
   927  		if doubleCheck {
   928  			h := heapBitsForAddr(x)
   929  			if !h.isPointer() {
   930  				throw("heapBitsSetType: pointer bit missing")
   931  			}
   932  			if !h.morePointers() {
   933  				throw("heapBitsSetType: scan bit missing")
   934  			}
   935  		}
   936  		return
   937  	}
   938  
   939  	h := heapBitsForAddr(x)
   940  	ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below)
   941  
   942  	// Heap bitmap bits for 2-word object are only 4 bits,
   943  	// so also shared with objects next to it.
   944  	// This is called out as a special case primarily for 32-bit systems,
   945  	// so that on 32-bit systems the code below can assume all objects
   946  	// are 4-word aligned (because they're all 16-byte aligned).
   947  	if size == 2*sys.PtrSize {
   948  		if typ.size == sys.PtrSize {
   949  			// We're allocating a block big enough to hold two pointers.
   950  			// On 64-bit, that means the actual object must be two pointers,
   951  			// or else we'd have used the one-pointer-sized block.
   952  			// On 32-bit, however, this is the 8-byte block, the smallest one.
   953  			// So it could be that we're allocating one pointer and this was
   954  			// just the smallest block available. Distinguish by checking dataSize.
   955  			// (In general the number of instances of typ being allocated is
   956  			// dataSize/typ.size.)
   957  			if sys.PtrSize == 4 && dataSize == sys.PtrSize {
   958  				// 1 pointer object. On 32-bit machines clear the bit for the
   959  				// unused second word.
   960  				*h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
   961  				*h.bitp |= (bitPointer | bitScan) << h.shift
   962  			} else {
   963  				// 2-element slice of pointer.
   964  				*h.bitp |= (bitPointer | bitScan | bitPointer<<heapBitsShift) << h.shift
   965  			}
   966  			return
   967  		}
   968  		// Otherwise typ.size must be 2*sys.PtrSize,
   969  		// and typ.kind&kindGCProg == 0.
   970  		if doubleCheck {
   971  			if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
   972  				print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
   973  				throw("heapBitsSetType")
   974  			}
   975  		}
   976  		b := uint32(*ptrmask)
   977  		hb := (b & 3) | bitScan
   978  		// bitPointer == 1, bitScan is 1 << 4, heapBitsShift is 1.
   979  		// 110011 is shifted h.shift and complemented.
   980  		// This clears out the bits that are about to be
   981  		// ored into *h.hbitp in the next instructions.
   982  		*h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
   983  		*h.bitp |= uint8(hb << h.shift)
   984  		return
   985  	}
   986  
   987  	// Copy from 1-bit ptrmask into 2-bit bitmap.
   988  	// The basic approach is to use a single uintptr as a bit buffer,
   989  	// alternating between reloading the buffer and writing bitmap bytes.
   990  	// In general, one load can supply two bitmap byte writes.
   991  	// This is a lot of lines of code, but it compiles into relatively few
   992  	// machine instructions.
   993  
   994  	outOfPlace := false
   995  	if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrand()%2 == 0) {
   996  		// This object spans heap arenas, so the bitmap may be
   997  		// discontiguous. Unroll it into the object instead
   998  		// and then copy it out.
   999  		//
  1000  		// In doubleCheck mode, we randomly do this anyway to
  1001  		// stress test the bitmap copying path.
  1002  		outOfPlace = true
  1003  		h.bitp = (*uint8)(unsafe.Pointer(x))
  1004  		h.last = nil
  1005  	}
  1006  
  1007  	var (
  1008  		// Ptrmask input.
  1009  		p     *byte   // last ptrmask byte read
  1010  		b     uintptr // ptrmask bits already loaded
  1011  		nb    uintptr // number of bits in b at next read
  1012  		endp  *byte   // final ptrmask byte to read (then repeat)
  1013  		endnb uintptr // number of valid bits in *endp
  1014  		pbits uintptr // alternate source of bits
  1015  
  1016  		// Heap bitmap output.
  1017  		w     uintptr // words processed
  1018  		nw    uintptr // number of words to process
  1019  		hbitp *byte   // next heap bitmap byte to write
  1020  		hb    uintptr // bits being prepared for *hbitp
  1021  	)
  1022  
  1023  	hbitp = h.bitp
  1024  
  1025  	// Handle GC program. Delayed until this part of the code
  1026  	// so that we can use the same double-checking mechanism
  1027  	// as the 1-bit case. Nothing above could have encountered
  1028  	// GC programs: the cases were all too small.
  1029  	if typ.kind&kindGCProg != 0 {
  1030  		heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
  1031  		if doubleCheck {
  1032  			// Double-check the heap bits written by GC program
  1033  			// by running the GC program to create a 1-bit pointer mask
  1034  			// and then jumping to the double-check code below.
  1035  			// This doesn't catch bugs shared between the 1-bit and 4-bit
  1036  			// GC program execution, but it does catch mistakes specific
  1037  			// to just one of those and bugs in heapBitsSetTypeGCProg's
  1038  			// implementation of arrays.
  1039  			lock(&debugPtrmask.lock)
  1040  			if debugPtrmask.data == nil {
  1041  				debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
  1042  			}
  1043  			ptrmask = debugPtrmask.data
  1044  			runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
  1045  		}
  1046  		goto Phase4
  1047  	}
  1048  
  1049  	// Note about sizes:
  1050  	//
  1051  	// typ.size is the number of words in the object,
  1052  	// and typ.ptrdata is the number of words in the prefix
  1053  	// of the object that contains pointers. That is, the final
  1054  	// typ.size - typ.ptrdata words contain no pointers.
  1055  	// This allows optimization of a common pattern where
  1056  	// an object has a small header followed by a large scalar
  1057  	// buffer. If we know the pointers are over, we don't have
  1058  	// to scan the buffer's heap bitmap at all.
  1059  	// The 1-bit ptrmasks are sized to contain only bits for
  1060  	// the typ.ptrdata prefix, zero padded out to a full byte
  1061  	// of bitmap. This code sets nw (below) so that heap bitmap
  1062  	// bits are only written for the typ.ptrdata prefix; if there is
  1063  	// more room in the allocated object, the next heap bitmap
  1064  	// entry is a 00, indicating that there are no more pointers
  1065  	// to scan. So only the ptrmask for the ptrdata bytes is needed.
  1066  	//
  1067  	// Replicated copies are not as nice: if there is an array of
  1068  	// objects with scalar tails, all but the last tail does have to
  1069  	// be initialized, because there is no way to say "skip forward".
  1070  	// However, because of the possibility of a repeated type with
  1071  	// size not a multiple of 4 pointers (one heap bitmap byte),
  1072  	// the code already must handle the last ptrmask byte specially
  1073  	// by treating it as containing only the bits for endnb pointers,
  1074  	// where endnb <= 4. We represent large scalar tails that must
  1075  	// be expanded in the replication by setting endnb larger than 4.
  1076  	// This will have the effect of reading many bits out of b,
  1077  	// but once the real bits are shifted out, b will supply as many
  1078  	// zero bits as we try to read, which is exactly what we need.
  1079  
  1080  	p = ptrmask
  1081  	if typ.size < dataSize {
  1082  		// Filling in bits for an array of typ.
  1083  		// Set up for repetition of ptrmask during main loop.
  1084  		// Note that ptrmask describes only a prefix of
  1085  		const maxBits = sys.PtrSize*8 - 7
  1086  		if typ.ptrdata/sys.PtrSize <= maxBits {
  1087  			// Entire ptrmask fits in uintptr with room for a byte fragment.
  1088  			// Load into pbits and never read from ptrmask again.
  1089  			// This is especially important when the ptrmask has
  1090  			// fewer than 8 bits in it; otherwise the reload in the middle
  1091  			// of the Phase 2 loop would itself need to loop to gather
  1092  			// at least 8 bits.
  1093  
  1094  			// Accumulate ptrmask into b.
  1095  			// ptrmask is sized to describe only typ.ptrdata, but we record
  1096  			// it as describing typ.size bytes, since all the high bits are zero.
  1097  			nb = typ.ptrdata / sys.PtrSize
  1098  			for i := uintptr(0); i < nb; i += 8 {
  1099  				b |= uintptr(*p) << i
  1100  				p = add1(p)
  1101  			}
  1102  			nb = typ.size / sys.PtrSize
  1103  
  1104  			// Replicate ptrmask to fill entire pbits uintptr.
  1105  			// Doubling and truncating is fewer steps than
  1106  			// iterating by nb each time. (nb could be 1.)
  1107  			// Since we loaded typ.ptrdata/sys.PtrSize bits
  1108  			// but are pretending to have typ.size/sys.PtrSize,
  1109  			// there might be no replication necessary/possible.
  1110  			pbits = b
  1111  			endnb = nb
  1112  			if nb+nb <= maxBits {
  1113  				for endnb <= sys.PtrSize*8 {
  1114  					pbits |= pbits << endnb
  1115  					endnb += endnb
  1116  				}
  1117  				// Truncate to a multiple of original ptrmask.
  1118  				// Because nb+nb <= maxBits, nb fits in a byte.
  1119  				// Byte division is cheaper than uintptr division.
  1120  				endnb = uintptr(maxBits/byte(nb)) * nb
  1121  				pbits &= 1<<endnb - 1
  1122  				b = pbits
  1123  				nb = endnb
  1124  			}
  1125  
  1126  			// Clear p and endp as sentinel for using pbits.
  1127  			// Checked during Phase 2 loop.
  1128  			p = nil
  1129  			endp = nil
  1130  		} else {
  1131  			// Ptrmask is larger. Read it multiple times.
  1132  			n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
  1133  			endp = addb(ptrmask, n)
  1134  			endnb = typ.size/sys.PtrSize - n*8
  1135  		}
  1136  	}
  1137  	if p != nil {
  1138  		b = uintptr(*p)
  1139  		p = add1(p)
  1140  		nb = 8
  1141  	}
  1142  
  1143  	if typ.size == dataSize {
  1144  		// Single entry: can stop once we reach the non-pointer data.
  1145  		nw = typ.ptrdata / sys.PtrSize
  1146  	} else {
  1147  		// Repeated instances of typ in an array.
  1148  		// Have to process first N-1 entries in full, but can stop
  1149  		// once we reach the non-pointer data in the final entry.
  1150  		nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
  1151  	}
  1152  	if nw == 0 {
  1153  		// No pointers! Caller was supposed to check.
  1154  		println("runtime: invalid type ", typ.string())
  1155  		throw("heapBitsSetType: called with non-pointer type")
  1156  		return
  1157  	}
  1158  	if nw < 2 {
  1159  		// Must write at least 2 words, because the "no scan"
  1160  		// encoding doesn't take effect until the third word.
  1161  		nw = 2
  1162  	}
  1163  
  1164  	// Phase 1: Special case for leading byte (shift==0) or half-byte (shift==2).
  1165  	// The leading byte is special because it contains the bits for word 1,
  1166  	// which does not have the scan bit set.
  1167  	// The leading half-byte is special because it's a half a byte,
  1168  	// so we have to be careful with the bits already there.
  1169  	switch {
  1170  	default:
  1171  		throw("heapBitsSetType: unexpected shift")
  1172  
  1173  	case h.shift == 0:
  1174  		// Ptrmask and heap bitmap are aligned.
  1175  		// Handle first byte of bitmap specially.
  1176  		//
  1177  		// The first byte we write out covers the first four
  1178  		// words of the object. The scan/dead bit on the first
  1179  		// word must be set to scan since there are pointers
  1180  		// somewhere in the object. The scan/dead bit on the
  1181  		// second word is the checkmark, so we don't set it.
  1182  		// In all following words, we set the scan/dead
  1183  		// appropriately to indicate that the object contains
  1184  		// to the next 2-bit entry in the bitmap.
  1185  		//
  1186  		// TODO: It doesn't matter if we set the checkmark, so
  1187  		// maybe this case isn't needed any more.
  1188  		hb = b & bitPointerAll
  1189  		hb |= bitScan | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
  1190  		if w += 4; w >= nw {
  1191  			goto Phase3
  1192  		}
  1193  		*hbitp = uint8(hb)
  1194  		hbitp = add1(hbitp)
  1195  		b >>= 4
  1196  		nb -= 4
  1197  
  1198  	case sys.PtrSize == 8 && h.shift == 2:
  1199  		// Ptrmask and heap bitmap are misaligned.
  1200  		// The bits for the first two words are in a byte shared
  1201  		// with another object, so we must be careful with the bits
  1202  		// already there.
  1203  		// We took care of 1-word and 2-word objects above,
  1204  		// so this is at least a 6-word object.
  1205  		hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift)
  1206  		// This is not noscan, so set the scan bit in the
  1207  		// first word.
  1208  		hb |= bitScan << (2 * heapBitsShift)
  1209  		b >>= 2
  1210  		nb -= 2
  1211  		// Note: no bitScan for second word because that's
  1212  		// the checkmark.
  1213  		*hbitp &^= uint8((bitPointer | bitScan | (bitPointer << heapBitsShift)) << (2 * heapBitsShift))
  1214  		*hbitp |= uint8(hb)
  1215  		hbitp = add1(hbitp)
  1216  		if w += 2; w >= nw {
  1217  			// We know that there is more data, because we handled 2-word objects above.
  1218  			// This must be at least a 6-word object. If we're out of pointer words,
  1219  			// mark no scan in next bitmap byte and finish.
  1220  			hb = 0
  1221  			w += 4
  1222  			goto Phase3
  1223  		}
  1224  	}
  1225  
  1226  	// Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap.
  1227  	// The loop computes the bits for that last write but does not execute the write;
  1228  	// it leaves the bits in hb for processing by phase 3.
  1229  	// To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to
  1230  	// use in the first half of the loop right now, and then we only adjust nb explicitly
  1231  	// if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop.
  1232  	nb -= 4
  1233  	for {
  1234  		// Emit bitmap byte.
  1235  		// b has at least nb+4 bits, with one exception:
  1236  		// if w+4 >= nw, then b has only nw-w bits,
  1237  		// but we'll stop at the break and then truncate
  1238  		// appropriately in Phase 3.
  1239  		hb = b & bitPointerAll
  1240  		hb |= bitScanAll
  1241  		if w += 4; w >= nw {
  1242  			break
  1243  		}
  1244  		*hbitp = uint8(hb)
  1245  		hbitp = add1(hbitp)
  1246  		b >>= 4
  1247  
  1248  		// Load more bits. b has nb right now.
  1249  		if p != endp {
  1250  			// Fast path: keep reading from ptrmask.
  1251  			// nb unmodified: we just loaded 8 bits,
  1252  			// and the next iteration will consume 8 bits,
  1253  			// leaving us with the same nb the next time we're here.
  1254  			if nb < 8 {
  1255  				b |= uintptr(*p) << nb
  1256  				p = add1(p)
  1257  			} else {
  1258  				// Reduce the number of bits in b.
  1259  				// This is important if we skipped
  1260  				// over a scalar tail, since nb could
  1261  				// be larger than the bit width of b.
  1262  				nb -= 8
  1263  			}
  1264  		} else if p == nil {
  1265  			// Almost as fast path: track bit count and refill from pbits.
  1266  			// For short repetitions.
  1267  			if nb < 8 {
  1268  				b |= pbits << nb
  1269  				nb += endnb
  1270  			}
  1271  			nb -= 8 // for next iteration
  1272  		} else {
  1273  			// Slow path: reached end of ptrmask.
  1274  			// Process final partial byte and rewind to start.
  1275  			b |= uintptr(*p) << nb
  1276  			nb += endnb
  1277  			if nb < 8 {
  1278  				b |= uintptr(*ptrmask) << nb
  1279  				p = add1(ptrmask)
  1280  			} else {
  1281  				nb -= 8
  1282  				p = ptrmask
  1283  			}
  1284  		}
  1285  
  1286  		// Emit bitmap byte.
  1287  		hb = b & bitPointerAll
  1288  		hb |= bitScanAll
  1289  		if w += 4; w >= nw {
  1290  			break
  1291  		}
  1292  		*hbitp = uint8(hb)
  1293  		hbitp = add1(hbitp)
  1294  		b >>= 4
  1295  	}
  1296  
  1297  Phase3:
  1298  	// Phase 3: Write last byte or partial byte and zero the rest of the bitmap entries.
  1299  	if w > nw {
  1300  		// Counting the 4 entries in hb not yet written to memory,
  1301  		// there are more entries than possible pointer slots.
  1302  		// Discard the excess entries (can't be more than 3).
  1303  		mask := uintptr(1)<<(4-(w-nw)) - 1
  1304  		hb &= mask | mask<<4 // apply mask to both pointer bits and scan bits
  1305  	}
  1306  
  1307  	// Change nw from counting possibly-pointer words to total words in allocation.
  1308  	nw = size / sys.PtrSize
  1309  
  1310  	// Write whole bitmap bytes.
  1311  	// The first is hb, the rest are zero.
  1312  	if w <= nw {
  1313  		*hbitp = uint8(hb)
  1314  		hbitp = add1(hbitp)
  1315  		hb = 0 // for possible final half-byte below
  1316  		for w += 4; w <= nw; w += 4 {
  1317  			*hbitp = 0
  1318  			hbitp = add1(hbitp)
  1319  		}
  1320  	}
  1321  
  1322  	// Write final partial bitmap byte if any.
  1323  	// We know w > nw, or else we'd still be in the loop above.
  1324  	// It can be bigger only due to the 4 entries in hb that it counts.
  1325  	// If w == nw+4 then there's nothing left to do: we wrote all nw entries
  1326  	// and can discard the 4 sitting in hb.
  1327  	// But if w == nw+2, we need to write first two in hb.
  1328  	// The byte is shared with the next object, so be careful with
  1329  	// existing bits.
  1330  	if w == nw+2 {
  1331  		*hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb)
  1332  	}
  1333  
  1334  Phase4:
  1335  	// Phase 4: Copy unrolled bitmap to per-arena bitmaps, if necessary.
  1336  	if outOfPlace {
  1337  		// TODO: We could probably make this faster by
  1338  		// handling [x+dataSize, x+size) specially.
  1339  		h := heapBitsForAddr(x)
  1340  		// cnw is the number of heap words, or bit pairs
  1341  		// remaining (like nw above).
  1342  		cnw := size / sys.PtrSize
  1343  		src := (*uint8)(unsafe.Pointer(x))
  1344  		// We know the first and last byte of the bitmap are
  1345  		// not the same, but it's still possible for small
  1346  		// objects span arenas, so it may share bitmap bytes
  1347  		// with neighboring objects.
  1348  		//
  1349  		// Handle the first byte specially if it's shared. See
  1350  		// Phase 1 for why this is the only special case we need.
  1351  		if doubleCheck {
  1352  			if !(h.shift == 0 || (sys.PtrSize == 8 && h.shift == 2)) {
  1353  				print("x=", x, " size=", size, " cnw=", h.shift, "\n")
  1354  				throw("bad start shift")
  1355  			}
  1356  		}
  1357  		if sys.PtrSize == 8 && h.shift == 2 {
  1358  			*h.bitp = *h.bitp&^((bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift)<<(2*heapBitsShift)) | *src
  1359  			h = h.next().next()
  1360  			cnw -= 2
  1361  			src = addb(src, 1)
  1362  		}
  1363  		// We're now byte aligned. Copy out to per-arena
  1364  		// bitmaps until the last byte (which may again be
  1365  		// partial).
  1366  		for cnw >= 4 {
  1367  			// This loop processes four words at a time,
  1368  			// so round cnw down accordingly.
  1369  			hNext, words := h.forwardOrBoundary(cnw / 4 * 4)
  1370  
  1371  			// n is the number of bitmap bytes to copy.
  1372  			n := words / 4
  1373  			memmove(unsafe.Pointer(h.bitp), unsafe.Pointer(src), n)
  1374  			cnw -= words
  1375  			h = hNext
  1376  			src = addb(src, n)
  1377  		}
  1378  		if doubleCheck && h.shift != 0 {
  1379  			print("cnw=", cnw, " h.shift=", h.shift, "\n")
  1380  			throw("bad shift after block copy")
  1381  		}
  1382  		// Handle the last byte if it's shared.
  1383  		if cnw == 2 {
  1384  			*h.bitp = *h.bitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | *src
  1385  			src = addb(src, 1)
  1386  			h = h.next().next()
  1387  		}
  1388  		if doubleCheck {
  1389  			if uintptr(unsafe.Pointer(src)) > x+size {
  1390  				throw("copy exceeded object size")
  1391  			}
  1392  			if !(cnw == 0 || cnw == 2) {
  1393  				print("x=", x, " size=", size, " cnw=", cnw, "\n")
  1394  				throw("bad number of remaining words")
  1395  			}
  1396  			// Set up hbitp so doubleCheck code below can check it.
  1397  			hbitp = h.bitp
  1398  		}
  1399  		// Zero the object where we wrote the bitmap.
  1400  		memclrNoHeapPointers(unsafe.Pointer(x), uintptr(unsafe.Pointer(src))-x)
  1401  	}
  1402  
  1403  	// Double check the whole bitmap.
  1404  	if doubleCheck {
  1405  		// x+size may not point to the heap, so back up one
  1406  		// word and then call next().
  1407  		end := heapBitsForAddr(x + size - sys.PtrSize).next()
  1408  		endAI := arenaIdx(end.arena)
  1409  		if !outOfPlace && (end.bitp == nil || (end.shift == 0 && end.bitp == &mheap_.arenas[endAI.l1()][endAI.l2()].bitmap[0])) {
  1410  			// The unrolling code above walks hbitp just
  1411  			// past the bitmap without moving to the next
  1412  			// arena. Synthesize this for end.bitp.
  1413  			end.arena--
  1414  			endAI = arenaIdx(end.arena)
  1415  			end.bitp = addb(&mheap_.arenas[endAI.l1()][endAI.l2()].bitmap[0], heapArenaBitmapBytes)
  1416  			end.last = nil
  1417  		}
  1418  		if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) {
  1419  			println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size)
  1420  			print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
  1421  			print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
  1422  			h0 := heapBitsForAddr(x)
  1423  			print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
  1424  			print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n")
  1425  			throw("bad heapBitsSetType")
  1426  		}
  1427  
  1428  		// Double-check that bits to be written were written correctly.
  1429  		// Does not check that other bits were not written, unfortunately.
  1430  		h := heapBitsForAddr(x)
  1431  		nptr := typ.ptrdata / sys.PtrSize
  1432  		ndata := typ.size / sys.PtrSize
  1433  		count := dataSize / typ.size
  1434  		totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
  1435  		for i := uintptr(0); i < size/sys.PtrSize; i++ {
  1436  			j := i % ndata
  1437  			var have, want uint8
  1438  			have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
  1439  			if i >= totalptr {
  1440  				want = 0 // deadmarker
  1441  				if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 {
  1442  					want = bitScan
  1443  				}
  1444  			} else {
  1445  				if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 {
  1446  					want |= bitPointer
  1447  				}
  1448  				if i != 1 {
  1449  					want |= bitScan
  1450  				} else {
  1451  					have &^= bitScan
  1452  				}
  1453  			}
  1454  			if have != want {
  1455  				println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size)
  1456  				print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
  1457  				print("kindGCProg=", typ.kind&kindGCProg != 0, " outOfPlace=", outOfPlace, "\n")
  1458  				print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
  1459  				h0 := heapBitsForAddr(x)
  1460  				print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
  1461  				print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
  1462  				print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
  1463  				println("at word", i, "offset", i*sys.PtrSize, "have", hex(have), "want", hex(want))
  1464  				if typ.kind&kindGCProg != 0 {
  1465  					println("GC program:")
  1466  					dumpGCProg(addb(typ.gcdata, 4))
  1467  				}
  1468  				throw("bad heapBitsSetType")
  1469  			}
  1470  			h = h.next()
  1471  		}
  1472  		if ptrmask == debugPtrmask.data {
  1473  			unlock(&debugPtrmask.lock)
  1474  		}
  1475  	}
  1476  }
  1477  
  1478  var debugPtrmask struct {
  1479  	lock mutex
  1480  	data *byte
  1481  }
  1482  
  1483  // heapBitsSetTypeGCProg implements heapBitsSetType using a GC program.
  1484  // progSize is the size of the memory described by the program.
  1485  // elemSize is the size of the element that the GC program describes (a prefix of).
  1486  // dataSize is the total size of the intended data, a multiple of elemSize.
  1487  // allocSize is the total size of the allocated memory.
  1488  //
  1489  // GC programs are only used for large allocations.
  1490  // heapBitsSetType requires that allocSize is a multiple of 4 words,
  1491  // so that the relevant bitmap bytes are not shared with surrounding
  1492  // objects.
  1493  func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
  1494  	if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
  1495  		// Alignment will be wrong.
  1496  		throw("heapBitsSetTypeGCProg: small allocation")
  1497  	}
  1498  	var totalBits uintptr
  1499  	if elemSize == dataSize {
  1500  		totalBits = runGCProg(prog, nil, h.bitp, 2)
  1501  		if totalBits*sys.PtrSize != progSize {
  1502  			println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
  1503  			throw("heapBitsSetTypeGCProg: unexpected bit count")
  1504  		}
  1505  	} else {
  1506  		count := dataSize / elemSize
  1507  
  1508  		// Piece together program trailer to run after prog that does:
  1509  		//	literal(0)
  1510  		//	repeat(1, elemSize-progSize-1) // zeros to fill element size
  1511  		//	repeat(elemSize, count-1) // repeat that element for count
  1512  		// This zero-pads the data remaining in the first element and then
  1513  		// repeats that first element to fill the array.
  1514  		var trailer [40]byte // 3 varints (max 10 each) + some bytes
  1515  		i := 0
  1516  		if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
  1517  			// literal(0)
  1518  			trailer[i] = 0x01
  1519  			i++
  1520  			trailer[i] = 0
  1521  			i++
  1522  			if n > 1 {
  1523  				// repeat(1, n-1)
  1524  				trailer[i] = 0x81
  1525  				i++
  1526  				n--
  1527  				for ; n >= 0x80; n >>= 7 {
  1528  					trailer[i] = byte(n | 0x80)
  1529  					i++
  1530  				}
  1531  				trailer[i] = byte(n)
  1532  				i++
  1533  			}
  1534  		}
  1535  		// repeat(elemSize/ptrSize, count-1)
  1536  		trailer[i] = 0x80
  1537  		i++
  1538  		n := elemSize / sys.PtrSize
  1539  		for ; n >= 0x80; n >>= 7 {
  1540  			trailer[i] = byte(n | 0x80)
  1541  			i++
  1542  		}
  1543  		trailer[i] = byte(n)
  1544  		i++
  1545  		n = count - 1
  1546  		for ; n >= 0x80; n >>= 7 {
  1547  			trailer[i] = byte(n | 0x80)
  1548  			i++
  1549  		}
  1550  		trailer[i] = byte(n)
  1551  		i++
  1552  		trailer[i] = 0
  1553  		i++
  1554  
  1555  		runGCProg(prog, &trailer[0], h.bitp, 2)
  1556  
  1557  		// Even though we filled in the full array just now,
  1558  		// record that we only filled in up to the ptrdata of the
  1559  		// last element. This will cause the code below to
  1560  		// memclr the dead section of the final array element,
  1561  		// so that scanobject can stop early in the final element.
  1562  		totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
  1563  	}
  1564  	endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
  1565  	endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/sys.PtrSize/wordsPerBitmapByte))
  1566  	memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
  1567  }
  1568  
  1569  // progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
  1570  // size the size of the region described by prog, in bytes.
  1571  // The resulting bitvector will have no more than size/sys.PtrSize bits.
  1572  func progToPointerMask(prog *byte, size uintptr) bitvector {
  1573  	n := (size/sys.PtrSize + 7) / 8
  1574  	x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
  1575  	x[len(x)-1] = 0xa1 // overflow check sentinel
  1576  	n = runGCProg(prog, nil, &x[0], 1)
  1577  	if x[len(x)-1] != 0xa1 {
  1578  		throw("progToPointerMask: overflow")
  1579  	}
  1580  	return bitvector{int32(n), &x[0]}
  1581  }
  1582  
  1583  // Packed GC pointer bitmaps, aka GC programs.
  1584  //
  1585  // For large types containing arrays, the type information has a
  1586  // natural repetition that can be encoded to save space in the
  1587  // binary and in the memory representation of the type information.
  1588  //
  1589  // The encoding is a simple Lempel-Ziv style bytecode machine
  1590  // with the following instructions:
  1591  //
  1592  //	00000000: stop
  1593  //	0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
  1594  //	10000000 n c: repeat the previous n bits c times; n, c are varints
  1595  //	1nnnnnnn c: repeat the previous n bits c times; c is a varint
  1596  
  1597  // runGCProg executes the GC program prog, and then trailer if non-nil,
  1598  // writing to dst with entries of the given size.
  1599  // If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst.
  1600  // If size == 2, dst is the 2-bit heap bitmap, and writes move backward
  1601  // starting at dst (because the heap bitmap does). In this case, the caller guarantees
  1602  // that only whole bytes in dst need to be written.
  1603  //
  1604  // runGCProg returns the number of 1- or 2-bit entries written to memory.
  1605  func runGCProg(prog, trailer, dst *byte, size int) uintptr {
  1606  	dstStart := dst
  1607  
  1608  	// Bits waiting to be written to memory.
  1609  	var bits uintptr
  1610  	var nbits uintptr
  1611  
  1612  	p := prog
  1613  Run:
  1614  	for {
  1615  		// Flush accumulated full bytes.
  1616  		// The rest of the loop assumes that nbits <= 7.
  1617  		for ; nbits >= 8; nbits -= 8 {
  1618  			if size == 1 {
  1619  				*dst = uint8(bits)
  1620  				dst = add1(dst)
  1621  				bits >>= 8
  1622  			} else {
  1623  				v := bits&bitPointerAll | bitScanAll
  1624  				*dst = uint8(v)
  1625  				dst = add1(dst)
  1626  				bits >>= 4
  1627  				v = bits&bitPointerAll | bitScanAll
  1628  				*dst = uint8(v)
  1629  				dst = add1(dst)
  1630  				bits >>= 4
  1631  			}
  1632  		}
  1633  
  1634  		// Process one instruction.
  1635  		inst := uintptr(*p)
  1636  		p = add1(p)
  1637  		n := inst & 0x7F
  1638  		if inst&0x80 == 0 {
  1639  			// Literal bits; n == 0 means end of program.
  1640  			if n == 0 {
  1641  				// Program is over; continue in trailer if present.
  1642  				if trailer != nil {
  1643  					//println("trailer")
  1644  					p = trailer
  1645  					trailer = nil
  1646  					continue
  1647  				}
  1648  				//println("done")
  1649  				break Run
  1650  			}
  1651  			//println("lit", n, dst)
  1652  			nbyte := n / 8
  1653  			for i := uintptr(0); i < nbyte; i++ {
  1654  				bits |= uintptr(*p) << nbits
  1655  				p = add1(p)
  1656  				if size == 1 {
  1657  					*dst = uint8(bits)
  1658  					dst = add1(dst)
  1659  					bits >>= 8
  1660  				} else {
  1661  					v := bits&0xf | bitScanAll
  1662  					*dst = uint8(v)
  1663  					dst = add1(dst)
  1664  					bits >>= 4
  1665  					v = bits&0xf | bitScanAll
  1666  					*dst = uint8(v)
  1667  					dst = add1(dst)
  1668  					bits >>= 4
  1669  				}
  1670  			}
  1671  			if n %= 8; n > 0 {
  1672  				bits |= uintptr(*p) << nbits
  1673  				p = add1(p)
  1674  				nbits += n
  1675  			}
  1676  			continue Run
  1677  		}
  1678  
  1679  		// Repeat. If n == 0, it is encoded in a varint in the next bytes.
  1680  		if n == 0 {
  1681  			for off := uint(0); ; off += 7 {
  1682  				x := uintptr(*p)
  1683  				p = add1(p)
  1684  				n |= (x & 0x7F) << off
  1685  				if x&0x80 == 0 {
  1686  					break
  1687  				}
  1688  			}
  1689  		}
  1690  
  1691  		// Count is encoded in a varint in the next bytes.
  1692  		c := uintptr(0)
  1693  		for off := uint(0); ; off += 7 {
  1694  			x := uintptr(*p)
  1695  			p = add1(p)
  1696  			c |= (x & 0x7F) << off
  1697  			if x&0x80 == 0 {
  1698  				break
  1699  			}
  1700  		}
  1701  		c *= n // now total number of bits to copy
  1702  
  1703  		// If the number of bits being repeated is small, load them
  1704  		// into a register and use that register for the entire loop
  1705  		// instead of repeatedly reading from memory.
  1706  		// Handling fewer than 8 bits here makes the general loop simpler.
  1707  		// The cutoff is sys.PtrSize*8 - 7 to guarantee that when we add
  1708  		// the pattern to a bit buffer holding at most 7 bits (a partial byte)
  1709  		// it will not overflow.
  1710  		src := dst
  1711  		const maxBits = sys.PtrSize*8 - 7
  1712  		if n <= maxBits {
  1713  			// Start with bits in output buffer.
  1714  			pattern := bits
  1715  			npattern := nbits
  1716  
  1717  			// If we need more bits, fetch them from memory.
  1718  			if size == 1 {
  1719  				src = subtract1(src)
  1720  				for npattern < n {
  1721  					pattern <<= 8
  1722  					pattern |= uintptr(*src)
  1723  					src = subtract1(src)
  1724  					npattern += 8
  1725  				}
  1726  			} else {
  1727  				src = subtract1(src)
  1728  				for npattern < n {
  1729  					pattern <<= 4
  1730  					pattern |= uintptr(*src) & 0xf
  1731  					src = subtract1(src)
  1732  					npattern += 4
  1733  				}
  1734  			}
  1735  
  1736  			// We started with the whole bit output buffer,
  1737  			// and then we loaded bits from whole bytes.
  1738  			// Either way, we might now have too many instead of too few.
  1739  			// Discard the extra.
  1740  			if npattern > n {
  1741  				pattern >>= npattern - n
  1742  				npattern = n
  1743  			}
  1744  
  1745  			// Replicate pattern to at most maxBits.
  1746  			if npattern == 1 {
  1747  				// One bit being repeated.
  1748  				// If the bit is 1, make the pattern all 1s.
  1749  				// If the bit is 0, the pattern is already all 0s,
  1750  				// but we can claim that the number of bits
  1751  				// in the word is equal to the number we need (c),
  1752  				// because right shift of bits will zero fill.
  1753  				if pattern == 1 {
  1754  					pattern = 1<<maxBits - 1
  1755  					npattern = maxBits
  1756  				} else {
  1757  					npattern = c
  1758  				}
  1759  			} else {
  1760  				b := pattern
  1761  				nb := npattern
  1762  				if nb+nb <= maxBits {
  1763  					// Double pattern until the whole uintptr is filled.
  1764  					for nb <= sys.PtrSize*8 {
  1765  						b |= b << nb
  1766  						nb += nb
  1767  					}
  1768  					// Trim away incomplete copy of original pattern in high bits.
  1769  					// TODO(rsc): Replace with table lookup or loop on systems without divide?
  1770  					nb = maxBits / npattern * npattern
  1771  					b &= 1<<nb - 1
  1772  					pattern = b
  1773  					npattern = nb
  1774  				}
  1775  			}
  1776  
  1777  			// Add pattern to bit buffer and flush bit buffer, c/npattern times.
  1778  			// Since pattern contains >8 bits, there will be full bytes to flush
  1779  			// on each iteration.
  1780  			for ; c >= npattern; c -= npattern {
  1781  				bits |= pattern << nbits
  1782  				nbits += npattern
  1783  				if size == 1 {
  1784  					for nbits >= 8 {
  1785  						*dst = uint8(bits)
  1786  						dst = add1(dst)
  1787  						bits >>= 8
  1788  						nbits -= 8
  1789  					}
  1790  				} else {
  1791  					for nbits >= 4 {
  1792  						*dst = uint8(bits&0xf | bitScanAll)
  1793  						dst = add1(dst)
  1794  						bits >>= 4
  1795  						nbits -= 4
  1796  					}
  1797  				}
  1798  			}
  1799  
  1800  			// Add final fragment to bit buffer.
  1801  			if c > 0 {
  1802  				pattern &= 1<<c - 1
  1803  				bits |= pattern << nbits
  1804  				nbits += c
  1805  			}
  1806  			continue Run
  1807  		}
  1808  
  1809  		// Repeat; n too large to fit in a register.
  1810  		// Since nbits <= 7, we know the first few bytes of repeated data
  1811  		// are already written to memory.
  1812  		off := n - nbits // n > nbits because n > maxBits and nbits <= 7
  1813  		if size == 1 {
  1814  			// Leading src fragment.
  1815  			src = subtractb(src, (off+7)/8)
  1816  			if frag := off & 7; frag != 0 {
  1817  				bits |= uintptr(*src) >> (8 - frag) << nbits
  1818  				src = add1(src)
  1819  				nbits += frag
  1820  				c -= frag
  1821  			}
  1822  			// Main loop: load one byte, write another.
  1823  			// The bits are rotating through the bit buffer.
  1824  			for i := c / 8; i > 0; i-- {
  1825  				bits |= uintptr(*src) << nbits
  1826  				src = add1(src)
  1827  				*dst = uint8(bits)
  1828  				dst = add1(dst)
  1829  				bits >>= 8
  1830  			}
  1831  			// Final src fragment.
  1832  			if c %= 8; c > 0 {
  1833  				bits |= (uintptr(*src) & (1<<c - 1)) << nbits
  1834  				nbits += c
  1835  			}
  1836  		} else {
  1837  			// Leading src fragment.
  1838  			src = subtractb(src, (off+3)/4)
  1839  			if frag := off & 3; frag != 0 {
  1840  				bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits
  1841  				src = add1(src)
  1842  				nbits += frag
  1843  				c -= frag
  1844  			}
  1845  			// Main loop: load one byte, write another.
  1846  			// The bits are rotating through the bit buffer.
  1847  			for i := c / 4; i > 0; i-- {
  1848  				bits |= (uintptr(*src) & 0xf) << nbits
  1849  				src = add1(src)
  1850  				*dst = uint8(bits&0xf | bitScanAll)
  1851  				dst = add1(dst)
  1852  				bits >>= 4
  1853  			}
  1854  			// Final src fragment.
  1855  			if c %= 4; c > 0 {
  1856  				bits |= (uintptr(*src) & (1<<c - 1)) << nbits
  1857  				nbits += c
  1858  			}
  1859  		}
  1860  	}
  1861  
  1862  	// Write any final bits out, using full-byte writes, even for the final byte.
  1863  	var totalBits uintptr
  1864  	if size == 1 {
  1865  		totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
  1866  		nbits += -nbits & 7
  1867  		for ; nbits > 0; nbits -= 8 {
  1868  			*dst = uint8(bits)
  1869  			dst = add1(dst)
  1870  			bits >>= 8
  1871  		}
  1872  	} else {
  1873  		totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*4 + nbits
  1874  		nbits += -nbits & 3
  1875  		for ; nbits > 0; nbits -= 4 {
  1876  			v := bits&0xf | bitScanAll
  1877  			*dst = uint8(v)
  1878  			dst = add1(dst)
  1879  			bits >>= 4
  1880  		}
  1881  	}
  1882  	return totalBits
  1883  }
  1884  
  1885  func dumpGCProg(p *byte) {
  1886  	nptr := 0
  1887  	for {
  1888  		x := *p
  1889  		p = add1(p)
  1890  		if x == 0 {
  1891  			print("\t", nptr, " end\n")
  1892  			break
  1893  		}
  1894  		if x&0x80 == 0 {
  1895  			print("\t", nptr, " lit ", x, ":")
  1896  			n := int(x+7) / 8
  1897  			for i := 0; i < n; i++ {
  1898  				print(" ", hex(*p))
  1899  				p = add1(p)
  1900  			}
  1901  			print("\n")
  1902  			nptr += int(x)
  1903  		} else {
  1904  			nbit := int(x &^ 0x80)
  1905  			if nbit == 0 {
  1906  				for nb := uint(0); ; nb += 7 {
  1907  					x := *p
  1908  					p = add1(p)
  1909  					nbit |= int(x&0x7f) << nb
  1910  					if x&0x80 == 0 {
  1911  						break
  1912  					}
  1913  				}
  1914  			}
  1915  			count := 0
  1916  			for nb := uint(0); ; nb += 7 {
  1917  				x := *p
  1918  				p = add1(p)
  1919  				count |= int(x&0x7f) << nb
  1920  				if x&0x80 == 0 {
  1921  					break
  1922  				}
  1923  			}
  1924  			print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
  1925  			nptr += nbit * count
  1926  		}
  1927  	}
  1928  }
  1929  
  1930  // Testing.
  1931  
  1932  func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
  1933  	target := (*stkframe)(ctxt)
  1934  	if frame.sp <= target.sp && target.sp < frame.varp {
  1935  		*target = *frame
  1936  		return false
  1937  	}
  1938  	return true
  1939  }
  1940  
  1941  // gcbits returns the GC type info for x, for testing.
  1942  // The result is the bitmap entries (0 or 1), one entry per byte.
  1943  //go:linkname reflect_gcbits reflect.gcbits
  1944  func reflect_gcbits(x interface{}) []byte {
  1945  	ret := getgcmask(x)
  1946  	typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
  1947  	nptr := typ.ptrdata / sys.PtrSize
  1948  	for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
  1949  		ret = ret[:len(ret)-1]
  1950  	}
  1951  	return ret
  1952  }
  1953  
  1954  // Returns GC type info for object p for testing.
  1955  func getgcmask(ep interface{}) (mask []byte) {
  1956  	e := *efaceOf(&ep)
  1957  	p := e.data
  1958  	t := e._type
  1959  	// data or bss
  1960  	for _, datap := range activeModules() {
  1961  		// data
  1962  		if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
  1963  			bitmap := datap.gcdatamask.bytedata
  1964  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  1965  			mask = make([]byte, n/sys.PtrSize)
  1966  			for i := uintptr(0); i < n; i += sys.PtrSize {
  1967  				off := (uintptr(p) + i - datap.data) / sys.PtrSize
  1968  				mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  1969  			}
  1970  			return
  1971  		}
  1972  
  1973  		// bss
  1974  		if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
  1975  			bitmap := datap.gcbssmask.bytedata
  1976  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  1977  			mask = make([]byte, n/sys.PtrSize)
  1978  			for i := uintptr(0); i < n; i += sys.PtrSize {
  1979  				off := (uintptr(p) + i - datap.bss) / sys.PtrSize
  1980  				mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  1981  			}
  1982  			return
  1983  		}
  1984  	}
  1985  
  1986  	// heap
  1987  	if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
  1988  		hbits := heapBitsForAddr(base)
  1989  		n := s.elemsize
  1990  		mask = make([]byte, n/sys.PtrSize)
  1991  		for i := uintptr(0); i < n; i += sys.PtrSize {
  1992  			if hbits.isPointer() {
  1993  				mask[i/sys.PtrSize] = 1
  1994  			}
  1995  			if i != 1*sys.PtrSize && !hbits.morePointers() {
  1996  				mask = mask[:i/sys.PtrSize]
  1997  				break
  1998  			}
  1999  			hbits = hbits.next()
  2000  		}
  2001  		return
  2002  	}
  2003  
  2004  	// stack
  2005  	if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi {
  2006  		var frame stkframe
  2007  		frame.sp = uintptr(p)
  2008  		_g_ := getg()
  2009  		gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
  2010  		if frame.fn.valid() {
  2011  			locals, _ := getStackMap(&frame, nil, false)
  2012  			if locals.n == 0 {
  2013  				return
  2014  			}
  2015  			size := uintptr(locals.n) * sys.PtrSize
  2016  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  2017  			mask = make([]byte, n/sys.PtrSize)
  2018  			for i := uintptr(0); i < n; i += sys.PtrSize {
  2019  				off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
  2020  				mask[i/sys.PtrSize] = locals.ptrbit(off)
  2021  			}
  2022  		}
  2023  		return
  2024  	}
  2025  
  2026  	// otherwise, not something the GC knows about.
  2027  	// possibly read-only data, like malloc(0).
  2028  	// must not have pointers
  2029  	return
  2030  }
  2031  

View as plain text