Source file src/runtime/mbitmap.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: type and heap bitmaps.
     6  //
     7  // Stack, data, and bss bitmaps
     8  //
     9  // Stack frames and global variables in the data and bss sections are described
    10  // by 1-bit bitmaps in which 0 means uninteresting and 1 means live pointer
    11  // to be visited during GC. The bits in each byte are consumed starting with
    12  // the low bit: 1<<0, 1<<1, and so on.
    13  //
    14  // Heap bitmap
    15  //
    16  // The heap bitmap comprises 2 bits for each pointer-sized word in the heap,
    17  // stored in the heapArena metadata backing each heap arena.
    18  // That is, if ha is the heapArena for the arena starting a start,
    19  // then ha.bitmap[0] holds the 2-bit entries for the four words start
    20  // through start+3*ptrSize, ha.bitmap[1] holds the entries for
    21  // start+4*ptrSize through start+7*ptrSize, and so on.
    22  //
    23  // In each 2-bit entry, the lower bit holds the same information as in the 1-bit
    24  // bitmaps: 0 means uninteresting and 1 means live pointer to be visited during GC.
    25  // The meaning of the high bit depends on the position of the word being described
    26  // in its allocated object. In all words *except* the second word, the
    27  // high bit indicates that the object is still being described. In
    28  // these words, if a bit pair with a high bit 0 is encountered, the
    29  // low bit can also be assumed to be 0, and the object description is
    30  // over. This 00 is called the ``dead'' encoding: it signals that the
    31  // rest of the words in the object are uninteresting to the garbage
    32  // collector.
    33  //
    34  // In the second word, the high bit is the GC ``checkmarked'' bit (see below).
    35  //
    36  // The 2-bit entries are split when written into the byte, so that the top half
    37  // of the byte contains 4 high bits and the bottom half contains 4 low (pointer)
    38  // bits.
    39  // This form allows a copy from the 1-bit to the 4-bit form to keep the
    40  // pointer bits contiguous, instead of having to space them out.
    41  //
    42  // The code makes use of the fact that the zero value for a heap bitmap
    43  // has no live pointer bit set and is (depending on position), not used,
    44  // not checkmarked, and is the dead encoding.
    45  // These properties must be preserved when modifying the encoding.
    46  //
    47  // The bitmap for noscan spans is not maintained. Code must ensure
    48  // that an object is scannable before consulting its bitmap by
    49  // checking either the noscan bit in the span or by consulting its
    50  // type's information.
    51  //
    52  // Checkmarks
    53  //
    54  // In a concurrent garbage collector, one worries about failing to mark
    55  // a live object due to mutations without write barriers or bugs in the
    56  // collector implementation. As a sanity check, the GC has a 'checkmark'
    57  // mode that retraverses the object graph with the world stopped, to make
    58  // sure that everything that should be marked is marked.
    59  // In checkmark mode, in the heap bitmap, the high bit of the 2-bit entry
    60  // for the second word of the object holds the checkmark bit.
    61  // When not in checkmark mode, this bit is set to 1.
    62  //
    63  // The smallest possible allocation is 8 bytes. On a 32-bit machine, that
    64  // means every allocated object has two words, so there is room for the
    65  // checkmark bit. On a 64-bit machine, however, the 8-byte allocation is
    66  // just one word, so the second bit pair is not available for encoding the
    67  // checkmark. However, because non-pointer allocations are combined
    68  // into larger 16-byte (maxTinySize) allocations, a plain 8-byte allocation
    69  // must be a pointer, so the type bit in the first word is not actually needed.
    70  // It is still used in general, except in checkmark the type bit is repurposed
    71  // as the checkmark bit and then reinitialized (to 1) as the type bit when
    72  // finished.
    73  //
    74  
    75  package runtime
    76  
    77  import (
    78  	"runtime/internal/atomic"
    79  	"runtime/internal/sys"
    80  	"unsafe"
    81  )
    82  
    83  const (
    84  	bitPointer = 1 << 0
    85  	bitScan    = 1 << 4
    86  
    87  	heapBitsShift      = 1     // shift offset between successive bitPointer or bitScan entries
    88  	wordsPerBitmapByte = 8 / 2 // heap words described by one bitmap byte
    89  
    90  	// all scan/pointer bits in a byte
    91  	bitScanAll    = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
    92  	bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
    93  )
    94  
    95  // addb returns the byte pointer p+n.
    96  //go:nowritebarrier
    97  //go:nosplit
    98  func addb(p *byte, n uintptr) *byte {
    99  	// Note: wrote out full expression instead of calling add(p, n)
   100  	// to reduce the number of temporaries generated by the
   101  	// compiler for this trivial expression during inlining.
   102  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
   103  }
   104  
   105  // subtractb returns the byte pointer p-n.
   106  //go:nowritebarrier
   107  //go:nosplit
   108  func subtractb(p *byte, n uintptr) *byte {
   109  	// Note: wrote out full expression instead of calling add(p, -n)
   110  	// to reduce the number of temporaries generated by the
   111  	// compiler for this trivial expression during inlining.
   112  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
   113  }
   114  
   115  // add1 returns the byte pointer p+1.
   116  //go:nowritebarrier
   117  //go:nosplit
   118  func add1(p *byte) *byte {
   119  	// Note: wrote out full expression instead of calling addb(p, 1)
   120  	// to reduce the number of temporaries generated by the
   121  	// compiler for this trivial expression during inlining.
   122  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
   123  }
   124  
   125  // subtract1 returns the byte pointer p-1.
   126  //go:nowritebarrier
   127  //
   128  // nosplit because it is used during write barriers and must not be preempted.
   129  //go:nosplit
   130  func subtract1(p *byte) *byte {
   131  	// Note: wrote out full expression instead of calling subtractb(p, 1)
   132  	// to reduce the number of temporaries generated by the
   133  	// compiler for this trivial expression during inlining.
   134  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
   135  }
   136  
   137  // heapBits provides access to the bitmap bits for a single heap word.
   138  // The methods on heapBits take value receivers so that the compiler
   139  // can more easily inline calls to those methods and registerize the
   140  // struct fields independently.
   141  type heapBits struct {
   142  	bitp  *uint8
   143  	shift uint32
   144  	arena uint32 // Index of heap arena containing bitp
   145  	last  *uint8 // Last byte arena's bitmap
   146  }
   147  
   148  // Make the compiler check that heapBits.arena is large enough to hold
   149  // the maximum arena frame number.
   150  var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1}
   151  
   152  // markBits provides access to the mark bit for an object in the heap.
   153  // bytep points to the byte holding the mark bit.
   154  // mask is a byte with a single bit set that can be &ed with *bytep
   155  // to see if the bit has been set.
   156  // *m.byte&m.mask != 0 indicates the mark bit is set.
   157  // index can be used along with span information to generate
   158  // the address of the object in the heap.
   159  // We maintain one set of mark bits for allocation and one for
   160  // marking purposes.
   161  type markBits struct {
   162  	bytep *uint8
   163  	mask  uint8
   164  	index uintptr
   165  }
   166  
   167  //go:nosplit
   168  func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
   169  	bytep, mask := s.allocBits.bitp(allocBitIndex)
   170  	return markBits{bytep, mask, allocBitIndex}
   171  }
   172  
   173  // refillAllocCache takes 8 bytes s.allocBits starting at whichByte
   174  // and negates them so that ctz (count trailing zeros) instructions
   175  // can be used. It then places these 8 bytes into the cached 64 bit
   176  // s.allocCache.
   177  func (s *mspan) refillAllocCache(whichByte uintptr) {
   178  	bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
   179  	aCache := uint64(0)
   180  	aCache |= uint64(bytes[0])
   181  	aCache |= uint64(bytes[1]) << (1 * 8)
   182  	aCache |= uint64(bytes[2]) << (2 * 8)
   183  	aCache |= uint64(bytes[3]) << (3 * 8)
   184  	aCache |= uint64(bytes[4]) << (4 * 8)
   185  	aCache |= uint64(bytes[5]) << (5 * 8)
   186  	aCache |= uint64(bytes[6]) << (6 * 8)
   187  	aCache |= uint64(bytes[7]) << (7 * 8)
   188  	s.allocCache = ^aCache
   189  }
   190  
   191  // nextFreeIndex returns the index of the next free object in s at
   192  // or after s.freeindex.
   193  // There are hardware instructions that can be used to make this
   194  // faster if profiling warrants it.
   195  func (s *mspan) nextFreeIndex() uintptr {
   196  	sfreeindex := s.freeindex
   197  	snelems := s.nelems
   198  	if sfreeindex == snelems {
   199  		return sfreeindex
   200  	}
   201  	if sfreeindex > snelems {
   202  		throw("s.freeindex > s.nelems")
   203  	}
   204  
   205  	aCache := s.allocCache
   206  
   207  	bitIndex := sys.Ctz64(aCache)
   208  	for bitIndex == 64 {
   209  		// Move index to start of next cached bits.
   210  		sfreeindex = (sfreeindex + 64) &^ (64 - 1)
   211  		if sfreeindex >= snelems {
   212  			s.freeindex = snelems
   213  			return snelems
   214  		}
   215  		whichByte := sfreeindex / 8
   216  		// Refill s.allocCache with the next 64 alloc bits.
   217  		s.refillAllocCache(whichByte)
   218  		aCache = s.allocCache
   219  		bitIndex = sys.Ctz64(aCache)
   220  		// nothing available in cached bits
   221  		// grab the next 8 bytes and try again.
   222  	}
   223  	result := sfreeindex + uintptr(bitIndex)
   224  	if result >= snelems {
   225  		s.freeindex = snelems
   226  		return snelems
   227  	}
   228  
   229  	s.allocCache >>= uint(bitIndex + 1)
   230  	sfreeindex = result + 1
   231  
   232  	if sfreeindex%64 == 0 && sfreeindex != snelems {
   233  		// We just incremented s.freeindex so it isn't 0.
   234  		// As each 1 in s.allocCache was encountered and used for allocation
   235  		// it was shifted away. At this point s.allocCache contains all 0s.
   236  		// Refill s.allocCache so that it corresponds
   237  		// to the bits at s.allocBits starting at s.freeindex.
   238  		whichByte := sfreeindex / 8
   239  		s.refillAllocCache(whichByte)
   240  	}
   241  	s.freeindex = sfreeindex
   242  	return result
   243  }
   244  
   245  // isFree reports whether the index'th object in s is unallocated.
   246  func (s *mspan) isFree(index uintptr) bool {
   247  	if index < s.freeindex {
   248  		return false
   249  	}
   250  	bytep, mask := s.allocBits.bitp(index)
   251  	return *bytep&mask == 0
   252  }
   253  
   254  func (s *mspan) objIndex(p uintptr) uintptr {
   255  	byteOffset := p - s.base()
   256  	if byteOffset == 0 {
   257  		return 0
   258  	}
   259  	if s.baseMask != 0 {
   260  		// s.baseMask is non-0, elemsize is a power of two, so shift by s.divShift
   261  		return byteOffset >> s.divShift
   262  	}
   263  	return uintptr(((uint64(byteOffset) >> s.divShift) * uint64(s.divMul)) >> s.divShift2)
   264  }
   265  
   266  func markBitsForAddr(p uintptr) markBits {
   267  	s := spanOf(p)
   268  	objIndex := s.objIndex(p)
   269  	return s.markBitsForIndex(objIndex)
   270  }
   271  
   272  func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
   273  	bytep, mask := s.gcmarkBits.bitp(objIndex)
   274  	return markBits{bytep, mask, objIndex}
   275  }
   276  
   277  func (s *mspan) markBitsForBase() markBits {
   278  	return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
   279  }
   280  
   281  // isMarked reports whether mark bit m is set.
   282  func (m markBits) isMarked() bool {
   283  	return *m.bytep&m.mask != 0
   284  }
   285  
   286  // setMarked sets the marked bit in the markbits, atomically.
   287  func (m markBits) setMarked() {
   288  	// Might be racing with other updates, so use atomic update always.
   289  	// We used to be clever here and use a non-atomic update in certain
   290  	// cases, but it's not worth the risk.
   291  	atomic.Or8(m.bytep, m.mask)
   292  }
   293  
   294  // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
   295  func (m markBits) setMarkedNonAtomic() {
   296  	*m.bytep |= m.mask
   297  }
   298  
   299  // clearMarked clears the marked bit in the markbits, atomically.
   300  func (m markBits) clearMarked() {
   301  	// Might be racing with other updates, so use atomic update always.
   302  	// We used to be clever here and use a non-atomic update in certain
   303  	// cases, but it's not worth the risk.
   304  	atomic.And8(m.bytep, ^m.mask)
   305  }
   306  
   307  // markBitsForSpan returns the markBits for the span base address base.
   308  func markBitsForSpan(base uintptr) (mbits markBits) {
   309  	mbits = markBitsForAddr(base)
   310  	if mbits.mask != 1 {
   311  		throw("markBitsForSpan: unaligned start")
   312  	}
   313  	return mbits
   314  }
   315  
   316  // advance advances the markBits to the next object in the span.
   317  func (m *markBits) advance() {
   318  	if m.mask == 1<<7 {
   319  		m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
   320  		m.mask = 1
   321  	} else {
   322  		m.mask = m.mask << 1
   323  	}
   324  	m.index++
   325  }
   326  
   327  // heapBitsForAddr returns the heapBits for the address addr.
   328  // The caller must ensure addr is in an allocated span.
   329  // In particular, be careful not to point past the end of an object.
   330  //
   331  // nosplit because it is used during write barriers and must not be preempted.
   332  //go:nosplit
   333  func heapBitsForAddr(addr uintptr) (h heapBits) {
   334  	// 2 bits per word, 4 pairs per byte, and a mask is hard coded.
   335  	arena := arenaIndex(addr)
   336  	ha := mheap_.arenas[arena.l1()][arena.l2()]
   337  	// The compiler uses a load for nil checking ha, but in this
   338  	// case we'll almost never hit that cache line again, so it
   339  	// makes more sense to do a value check.
   340  	if ha == nil {
   341  		// addr is not in the heap. Return nil heapBits, which
   342  		// we expect to crash in the caller.
   343  		return
   344  	}
   345  	h.bitp = &ha.bitmap[(addr/(sys.PtrSize*4))%heapArenaBitmapBytes]
   346  	h.shift = uint32((addr / sys.PtrSize) & 3)
   347  	h.arena = uint32(arena)
   348  	h.last = &ha.bitmap[len(ha.bitmap)-1]
   349  	return
   350  }
   351  
   352  // findObject returns the base address for the heap object containing
   353  // the address p, the object's span, and the index of the object in s.
   354  // If p does not point into a heap object, it returns base == 0.
   355  //
   356  // If p points is an invalid heap pointer and debug.invalidptr != 0,
   357  // findObject panics.
   358  //
   359  // refBase and refOff optionally give the base address of the object
   360  // in which the pointer p was found and the byte offset at which it
   361  // was found. These are used for error reporting.
   362  func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
   363  	s = spanOf(p)
   364  	// If p is a bad pointer, it may not be in s's bounds.
   365  	if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
   366  		if s == nil || s.state == mSpanManual {
   367  			// If s is nil, the virtual address has never been part of the heap.
   368  			// This pointer may be to some mmap'd region, so we allow it.
   369  			// Pointers into stacks are also ok, the runtime manages these explicitly.
   370  			return
   371  		}
   372  
   373  		// The following ensures that we are rigorous about what data
   374  		// structures hold valid pointers.
   375  		if debug.invalidptr != 0 {
   376  			// Typically this indicates an incorrect use
   377  			// of unsafe or cgo to store a bad pointer in
   378  			// the Go heap. It may also indicate a runtime
   379  			// bug.
   380  			//
   381  			// TODO(austin): We could be more aggressive
   382  			// and detect pointers to unallocated objects
   383  			// in allocated spans.
   384  			printlock()
   385  			print("runtime: pointer ", hex(p))
   386  			if s.state != mSpanInUse {
   387  				print(" to unallocated span")
   388  			} else {
   389  				print(" to unused region of span")
   390  			}
   391  			print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
   392  			if refBase != 0 {
   393  				print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
   394  				gcDumpObject("object", refBase, refOff)
   395  			}
   396  			getg().m.traceback = 2
   397  			throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
   398  		}
   399  		return
   400  	}
   401  	// If this span holds object of a power of 2 size, just mask off the bits to
   402  	// the interior of the object. Otherwise use the size to get the base.
   403  	if s.baseMask != 0 {
   404  		// optimize for power of 2 sized objects.
   405  		base = s.base()
   406  		base = base + (p-base)&uintptr(s.baseMask)
   407  		objIndex = (base - s.base()) >> s.divShift
   408  		// base = p & s.baseMask is faster for small spans,
   409  		// but doesn't work for large spans.
   410  		// Overall, it's faster to use the more general computation above.
   411  	} else {
   412  		base = s.base()
   413  		if p-base >= s.elemsize {
   414  			// n := (p - base) / s.elemsize, using division by multiplication
   415  			objIndex = uintptr(p-base) >> s.divShift * uintptr(s.divMul) >> s.divShift2
   416  			base += objIndex * s.elemsize
   417  		}
   418  	}
   419  	return
   420  }
   421  
   422  // next returns the heapBits describing the next pointer-sized word in memory.
   423  // That is, if h describes address p, h.next() describes p+ptrSize.
   424  // Note that next does not modify h. The caller must record the result.
   425  //
   426  // nosplit because it is used during write barriers and must not be preempted.
   427  //go:nosplit
   428  func (h heapBits) next() heapBits {
   429  	if h.shift < 3*heapBitsShift {
   430  		h.shift += heapBitsShift
   431  	} else if h.bitp != h.last {
   432  		h.bitp, h.shift = add1(h.bitp), 0
   433  	} else {
   434  		// Move to the next arena.
   435  		return h.nextArena()
   436  	}
   437  	return h
   438  }
   439  
   440  // nextArena advances h to the beginning of the next heap arena.
   441  //
   442  // This is a slow-path helper to next. gc's inliner knows that
   443  // heapBits.next can be inlined even though it calls this. This is
   444  // marked noinline so it doesn't get inlined into next and cause next
   445  // to be too big to inline.
   446  //
   447  //go:nosplit
   448  //go:noinline
   449  func (h heapBits) nextArena() heapBits {
   450  	h.arena++
   451  	ai := arenaIdx(h.arena)
   452  	l2 := mheap_.arenas[ai.l1()]
   453  	if l2 == nil {
   454  		// We just passed the end of the object, which
   455  		// was also the end of the heap. Poison h. It
   456  		// should never be dereferenced at this point.
   457  		return heapBits{}
   458  	}
   459  	ha := l2[ai.l2()]
   460  	if ha == nil {
   461  		return heapBits{}
   462  	}
   463  	h.bitp, h.shift = &ha.bitmap[0], 0
   464  	h.last = &ha.bitmap[len(ha.bitmap)-1]
   465  	return h
   466  }
   467  
   468  // forward returns the heapBits describing n pointer-sized words ahead of h in memory.
   469  // That is, if h describes address p, h.forward(n) describes p+n*ptrSize.
   470  // h.forward(1) is equivalent to h.next(), just slower.
   471  // Note that forward does not modify h. The caller must record the result.
   472  // bits returns the heap bits for the current word.
   473  //go:nosplit
   474  func (h heapBits) forward(n uintptr) heapBits {
   475  	n += uintptr(h.shift) / heapBitsShift
   476  	nbitp := uintptr(unsafe.Pointer(h.bitp)) + n/4
   477  	h.shift = uint32(n%4) * heapBitsShift
   478  	if nbitp <= uintptr(unsafe.Pointer(h.last)) {
   479  		h.bitp = (*uint8)(unsafe.Pointer(nbitp))
   480  		return h
   481  	}
   482  
   483  	// We're in a new heap arena.
   484  	past := nbitp - (uintptr(unsafe.Pointer(h.last)) + 1)
   485  	h.arena += 1 + uint32(past/heapArenaBitmapBytes)
   486  	ai := arenaIdx(h.arena)
   487  	if l2 := mheap_.arenas[ai.l1()]; l2 != nil && l2[ai.l2()] != nil {
   488  		a := l2[ai.l2()]
   489  		h.bitp = &a.bitmap[past%heapArenaBitmapBytes]
   490  		h.last = &a.bitmap[len(a.bitmap)-1]
   491  	} else {
   492  		h.bitp, h.last = nil, nil
   493  	}
   494  	return h
   495  }
   496  
   497  // forwardOrBoundary is like forward, but stops at boundaries between
   498  // contiguous sections of the bitmap. It returns the number of words
   499  // advanced over, which will be <= n.
   500  func (h heapBits) forwardOrBoundary(n uintptr) (heapBits, uintptr) {
   501  	maxn := 4 * ((uintptr(unsafe.Pointer(h.last)) + 1) - uintptr(unsafe.Pointer(h.bitp)))
   502  	if n > maxn {
   503  		n = maxn
   504  	}
   505  	return h.forward(n), n
   506  }
   507  
   508  // The caller can test morePointers and isPointer by &-ing with bitScan and bitPointer.
   509  // The result includes in its higher bits the bits for subsequent words
   510  // described by the same bitmap byte.
   511  //
   512  // nosplit because it is used during write barriers and must not be preempted.
   513  //go:nosplit
   514  func (h heapBits) bits() uint32 {
   515  	// The (shift & 31) eliminates a test and conditional branch
   516  	// from the generated code.
   517  	return uint32(*h.bitp) >> (h.shift & 31)
   518  }
   519  
   520  // morePointers reports whether this word and all remaining words in this object
   521  // are scalars.
   522  // h must not describe the second word of the object.
   523  func (h heapBits) morePointers() bool {
   524  	return h.bits()&bitScan != 0
   525  }
   526  
   527  // isPointer reports whether the heap bits describe a pointer word.
   528  //
   529  // nosplit because it is used during write barriers and must not be preempted.
   530  //go:nosplit
   531  func (h heapBits) isPointer() bool {
   532  	return h.bits()&bitPointer != 0
   533  }
   534  
   535  // isCheckmarked reports whether the heap bits have the checkmarked bit set.
   536  // It must be told how large the object at h is, because the encoding of the
   537  // checkmark bit varies by size.
   538  // h must describe the initial word of the object.
   539  func (h heapBits) isCheckmarked(size uintptr) bool {
   540  	if size == sys.PtrSize {
   541  		return (*h.bitp>>h.shift)&bitPointer != 0
   542  	}
   543  	// All multiword objects are 2-word aligned,
   544  	// so we know that the initial word's 2-bit pair
   545  	// and the second word's 2-bit pair are in the
   546  	// same heap bitmap byte, *h.bitp.
   547  	return (*h.bitp>>(heapBitsShift+h.shift))&bitScan != 0
   548  }
   549  
   550  // setCheckmarked sets the checkmarked bit.
   551  // It must be told how large the object at h is, because the encoding of the
   552  // checkmark bit varies by size.
   553  // h must describe the initial word of the object.
   554  func (h heapBits) setCheckmarked(size uintptr) {
   555  	if size == sys.PtrSize {
   556  		atomic.Or8(h.bitp, bitPointer<<h.shift)
   557  		return
   558  	}
   559  	atomic.Or8(h.bitp, bitScan<<(heapBitsShift+h.shift))
   560  }
   561  
   562  // bulkBarrierPreWrite executes a write barrier
   563  // for every pointer slot in the memory range [src, src+size),
   564  // using pointer/scalar information from [dst, dst+size).
   565  // This executes the write barriers necessary before a memmove.
   566  // src, dst, and size must be pointer-aligned.
   567  // The range [dst, dst+size) must lie within a single object.
   568  // It does not perform the actual writes.
   569  //
   570  // As a special case, src == 0 indicates that this is being used for a
   571  // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
   572  // barrier.
   573  //
   574  // Callers should call bulkBarrierPreWrite immediately before
   575  // calling memmove(dst, src, size). This function is marked nosplit
   576  // to avoid being preempted; the GC must not stop the goroutine
   577  // between the memmove and the execution of the barriers.
   578  // The caller is also responsible for cgo pointer checks if this
   579  // may be writing Go pointers into non-Go memory.
   580  //
   581  // The pointer bitmap is not maintained for allocations containing
   582  // no pointers at all; any caller of bulkBarrierPreWrite must first
   583  // make sure the underlying allocation contains pointers, usually
   584  // by checking typ.ptrdata.
   585  //
   586  // Callers must perform cgo checks if writeBarrier.cgo.
   587  //
   588  //go:nosplit
   589  func bulkBarrierPreWrite(dst, src, size uintptr) {
   590  	if (dst|src|size)&(sys.PtrSize-1) != 0 {
   591  		throw("bulkBarrierPreWrite: unaligned arguments")
   592  	}
   593  	if !writeBarrier.needed {
   594  		return
   595  	}
   596  	if s := spanOf(dst); s == nil {
   597  		// If dst is a global, use the data or BSS bitmaps to
   598  		// execute write barriers.
   599  		for _, datap := range activeModules() {
   600  			if datap.data <= dst && dst < datap.edata {
   601  				bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
   602  				return
   603  			}
   604  		}
   605  		for _, datap := range activeModules() {
   606  			if datap.bss <= dst && dst < datap.ebss {
   607  				bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
   608  				return
   609  			}
   610  		}
   611  		return
   612  	} else if s.state != mSpanInUse || dst < s.base() || s.limit <= dst {
   613  		// dst was heap memory at some point, but isn't now.
   614  		// It can't be a global. It must be either our stack,
   615  		// or in the case of direct channel sends, it could be
   616  		// another stack. Either way, no need for barriers.
   617  		// This will also catch if dst is in a freed span,
   618  		// though that should never have.
   619  		return
   620  	}
   621  
   622  	buf := &getg().m.p.ptr().wbBuf
   623  	h := heapBitsForAddr(dst)
   624  	if src == 0 {
   625  		for i := uintptr(0); i < size; i += sys.PtrSize {
   626  			if h.isPointer() {
   627  				dstx := (*uintptr)(unsafe.Pointer(dst + i))
   628  				if !buf.putFast(*dstx, 0) {
   629  					wbBufFlush(nil, 0)
   630  				}
   631  			}
   632  			h = h.next()
   633  		}
   634  	} else {
   635  		for i := uintptr(0); i < size; i += sys.PtrSize {
   636  			if h.isPointer() {
   637  				dstx := (*uintptr)(unsafe.Pointer(dst + i))
   638  				srcx := (*uintptr)(unsafe.Pointer(src + i))
   639  				if !buf.putFast(*dstx, *srcx) {
   640  					wbBufFlush(nil, 0)
   641  				}
   642  			}
   643  			h = h.next()
   644  		}
   645  	}
   646  }
   647  
   648  // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
   649  // does not execute write barriers for [dst, dst+size).
   650  //
   651  // In addition to the requirements of bulkBarrierPreWrite
   652  // callers need to ensure [dst, dst+size) is zeroed.
   653  //
   654  // This is used for special cases where e.g. dst was just
   655  // created and zeroed with malloc.
   656  //go:nosplit
   657  func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
   658  	if (dst|src|size)&(sys.PtrSize-1) != 0 {
   659  		throw("bulkBarrierPreWrite: unaligned arguments")
   660  	}
   661  	if !writeBarrier.needed {
   662  		return
   663  	}
   664  	buf := &getg().m.p.ptr().wbBuf
   665  	h := heapBitsForAddr(dst)
   666  	for i := uintptr(0); i < size; i += sys.PtrSize {
   667  		if h.isPointer() {
   668  			srcx := (*uintptr)(unsafe.Pointer(src + i))
   669  			if !buf.putFast(0, *srcx) {
   670  				wbBufFlush(nil, 0)
   671  			}
   672  		}
   673  		h = h.next()
   674  	}
   675  }
   676  
   677  // bulkBarrierBitmap executes write barriers for copying from [src,
   678  // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
   679  // assumed to start maskOffset bytes into the data covered by the
   680  // bitmap in bits (which may not be a multiple of 8).
   681  //
   682  // This is used by bulkBarrierPreWrite for writes to data and BSS.
   683  //
   684  //go:nosplit
   685  func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
   686  	word := maskOffset / sys.PtrSize
   687  	bits = addb(bits, word/8)
   688  	mask := uint8(1) << (word % 8)
   689  
   690  	buf := &getg().m.p.ptr().wbBuf
   691  	for i := uintptr(0); i < size; i += sys.PtrSize {
   692  		if mask == 0 {
   693  			bits = addb(bits, 1)
   694  			if *bits == 0 {
   695  				// Skip 8 words.
   696  				i += 7 * sys.PtrSize
   697  				continue
   698  			}
   699  			mask = 1
   700  		}
   701  		if *bits&mask != 0 {
   702  			dstx := (*uintptr)(unsafe.Pointer(dst + i))
   703  			if src == 0 {
   704  				if !buf.putFast(*dstx, 0) {
   705  					wbBufFlush(nil, 0)
   706  				}
   707  			} else {
   708  				srcx := (*uintptr)(unsafe.Pointer(src + i))
   709  				if !buf.putFast(*dstx, *srcx) {
   710  					wbBufFlush(nil, 0)
   711  				}
   712  			}
   713  		}
   714  		mask <<= 1
   715  	}
   716  }
   717  
   718  // typeBitsBulkBarrier executes a write barrier for every
   719  // pointer that would be copied from [src, src+size) to [dst,
   720  // dst+size) by a memmove using the type bitmap to locate those
   721  // pointer slots.
   722  //
   723  // The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
   724  // dst, src, and size must be pointer-aligned.
   725  // The type typ must have a plain bitmap, not a GC program.
   726  // The only use of this function is in channel sends, and the
   727  // 64 kB channel element limit takes care of this for us.
   728  //
   729  // Must not be preempted because it typically runs right before memmove,
   730  // and the GC must observe them as an atomic action.
   731  //
   732  // Callers must perform cgo checks if writeBarrier.cgo.
   733  //
   734  //go:nosplit
   735  func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
   736  	if typ == nil {
   737  		throw("runtime: typeBitsBulkBarrier without type")
   738  	}
   739  	if typ.size != size {
   740  		println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
   741  		throw("runtime: invalid typeBitsBulkBarrier")
   742  	}
   743  	if typ.kind&kindGCProg != 0 {
   744  		println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
   745  		throw("runtime: invalid typeBitsBulkBarrier")
   746  	}
   747  	if !writeBarrier.needed {
   748  		return
   749  	}
   750  	ptrmask := typ.gcdata
   751  	buf := &getg().m.p.ptr().wbBuf
   752  	var bits uint32
   753  	for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
   754  		if i&(sys.PtrSize*8-1) == 0 {
   755  			bits = uint32(*ptrmask)
   756  			ptrmask = addb(ptrmask, 1)
   757  		} else {
   758  			bits = bits >> 1
   759  		}
   760  		if bits&1 != 0 {
   761  			dstx := (*uintptr)(unsafe.Pointer(dst + i))
   762  			srcx := (*uintptr)(unsafe.Pointer(src + i))
   763  			if !buf.putFast(*dstx, *srcx) {
   764  				wbBufFlush(nil, 0)
   765  			}
   766  		}
   767  	}
   768  }
   769  
   770  // The methods operating on spans all require that h has been returned
   771  // by heapBitsForSpan and that size, n, total are the span layout description
   772  // returned by the mspan's layout method.
   773  // If total > size*n, it means that there is extra leftover memory in the span,
   774  // usually due to rounding.
   775  //
   776  // TODO(rsc): Perhaps introduce a different heapBitsSpan type.
   777  
   778  // initSpan initializes the heap bitmap for a span.
   779  // It clears all checkmark bits.
   780  // If this is a span of pointer-sized objects, it initializes all
   781  // words to pointer/scan.
   782  // Otherwise, it initializes all words to scalar/dead.
   783  func (h heapBits) initSpan(s *mspan) {
   784  	size, n, total := s.layout()
   785  
   786  	// Init the markbit structures
   787  	s.freeindex = 0
   788  	s.allocCache = ^uint64(0) // all 1s indicating all free.
   789  	s.nelems = n
   790  	s.allocBits = nil
   791  	s.gcmarkBits = nil
   792  	s.gcmarkBits = newMarkBits(s.nelems)
   793  	s.allocBits = newAllocBits(s.nelems)
   794  
   795  	// Clear bits corresponding to objects.
   796  	nw := total / sys.PtrSize
   797  	if nw%wordsPerBitmapByte != 0 {
   798  		throw("initSpan: unaligned length")
   799  	}
   800  	if h.shift != 0 {
   801  		throw("initSpan: unaligned base")
   802  	}
   803  	for nw > 0 {
   804  		hNext, anw := h.forwardOrBoundary(nw)
   805  		nbyte := anw / wordsPerBitmapByte
   806  		if sys.PtrSize == 8 && size == sys.PtrSize {
   807  			bitp := h.bitp
   808  			for i := uintptr(0); i < nbyte; i++ {
   809  				*bitp = bitPointerAll | bitScanAll
   810  				bitp = add1(bitp)
   811  			}
   812  		} else {
   813  			memclrNoHeapPointers(unsafe.Pointer(h.bitp), nbyte)
   814  		}
   815  		h = hNext
   816  		nw -= anw
   817  	}
   818  }
   819  
   820  // initCheckmarkSpan initializes a span for being checkmarked.
   821  // It clears the checkmark bits, which are set to 1 in normal operation.
   822  func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
   823  	// The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
   824  	if sys.PtrSize == 8 && size == sys.PtrSize {
   825  		// Checkmark bit is type bit, bottom bit of every 2-bit entry.
   826  		// Only possible on 64-bit system, since minimum size is 8.
   827  		// Must clear type bit (checkmark bit) of every word.
   828  		// The type bit is the lower of every two-bit pair.
   829  		for i := uintptr(0); i < n; i += wordsPerBitmapByte {
   830  			*h.bitp &^= bitPointerAll
   831  			h = h.forward(wordsPerBitmapByte)
   832  		}
   833  		return
   834  	}
   835  	for i := uintptr(0); i < n; i++ {
   836  		*h.bitp &^= bitScan << (heapBitsShift + h.shift)
   837  		h = h.forward(size / sys.PtrSize)
   838  	}
   839  }
   840  
   841  // clearCheckmarkSpan undoes all the checkmarking in a span.
   842  // The actual checkmark bits are ignored, so the only work to do
   843  // is to fix the pointer bits. (Pointer bits are ignored by scanobject
   844  // but consulted by typedmemmove.)
   845  func (h heapBits) clearCheckmarkSpan(size, n, total uintptr) {
   846  	// The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
   847  	if sys.PtrSize == 8 && size == sys.PtrSize {
   848  		// Checkmark bit is type bit, bottom bit of every 2-bit entry.
   849  		// Only possible on 64-bit system, since minimum size is 8.
   850  		// Must clear type bit (checkmark bit) of every word.
   851  		// The type bit is the lower of every two-bit pair.
   852  		for i := uintptr(0); i < n; i += wordsPerBitmapByte {
   853  			*h.bitp |= bitPointerAll
   854  			h = h.forward(wordsPerBitmapByte)
   855  		}
   856  	}
   857  }
   858  
   859  // oneBitCount is indexed by byte and produces the
   860  // number of 1 bits in that byte. For example 128 has 1 bit set
   861  // and oneBitCount[128] will holds 1.
   862  var oneBitCount = [256]uint8{
   863  	0, 1, 1, 2, 1, 2, 2, 3,
   864  	1, 2, 2, 3, 2, 3, 3, 4,
   865  	1, 2, 2, 3, 2, 3, 3, 4,
   866  	2, 3, 3, 4, 3, 4, 4, 5,
   867  	1, 2, 2, 3, 2, 3, 3, 4,
   868  	2, 3, 3, 4, 3, 4, 4, 5,
   869  	2, 3, 3, 4, 3, 4, 4, 5,
   870  	3, 4, 4, 5, 4, 5, 5, 6,
   871  	1, 2, 2, 3, 2, 3, 3, 4,
   872  	2, 3, 3, 4, 3, 4, 4, 5,
   873  	2, 3, 3, 4, 3, 4, 4, 5,
   874  	3, 4, 4, 5, 4, 5, 5, 6,
   875  	2, 3, 3, 4, 3, 4, 4, 5,
   876  	3, 4, 4, 5, 4, 5, 5, 6,
   877  	3, 4, 4, 5, 4, 5, 5, 6,
   878  	4, 5, 5, 6, 5, 6, 6, 7,
   879  	1, 2, 2, 3, 2, 3, 3, 4,
   880  	2, 3, 3, 4, 3, 4, 4, 5,
   881  	2, 3, 3, 4, 3, 4, 4, 5,
   882  	3, 4, 4, 5, 4, 5, 5, 6,
   883  	2, 3, 3, 4, 3, 4, 4, 5,
   884  	3, 4, 4, 5, 4, 5, 5, 6,
   885  	3, 4, 4, 5, 4, 5, 5, 6,
   886  	4, 5, 5, 6, 5, 6, 6, 7,
   887  	2, 3, 3, 4, 3, 4, 4, 5,
   888  	3, 4, 4, 5, 4, 5, 5, 6,
   889  	3, 4, 4, 5, 4, 5, 5, 6,
   890  	4, 5, 5, 6, 5, 6, 6, 7,
   891  	3, 4, 4, 5, 4, 5, 5, 6,
   892  	4, 5, 5, 6, 5, 6, 6, 7,
   893  	4, 5, 5, 6, 5, 6, 6, 7,
   894  	5, 6, 6, 7, 6, 7, 7, 8}
   895  
   896  // countAlloc returns the number of objects allocated in span s by
   897  // scanning the allocation bitmap.
   898  // TODO:(rlh) Use popcount intrinsic.
   899  func (s *mspan) countAlloc() int {
   900  	count := 0
   901  	maxIndex := s.nelems / 8
   902  	for i := uintptr(0); i < maxIndex; i++ {
   903  		mrkBits := *s.gcmarkBits.bytep(i)
   904  		count += int(oneBitCount[mrkBits])
   905  	}
   906  	if bitsInLastByte := s.nelems % 8; bitsInLastByte != 0 {
   907  		mrkBits := *s.gcmarkBits.bytep(maxIndex)
   908  		mask := uint8((1 << bitsInLastByte) - 1)
   909  		bits := mrkBits & mask
   910  		count += int(oneBitCount[bits])
   911  	}
   912  	return count
   913  }
   914  
   915  // heapBitsSetType records that the new allocation [x, x+size)
   916  // holds in [x, x+dataSize) one or more values of type typ.
   917  // (The number of values is given by dataSize / typ.size.)
   918  // If dataSize < size, the fragment [x+dataSize, x+size) is
   919  // recorded as non-pointer data.
   920  // It is known that the type has pointers somewhere;
   921  // malloc does not call heapBitsSetType when there are no pointers,
   922  // because all free objects are marked as noscan during
   923  // heapBitsSweepSpan.
   924  //
   925  // There can only be one allocation from a given span active at a time,
   926  // and the bitmap for a span always falls on byte boundaries,
   927  // so there are no write-write races for access to the heap bitmap.
   928  // Hence, heapBitsSetType can access the bitmap without atomics.
   929  //
   930  // There can be read-write races between heapBitsSetType and things
   931  // that read the heap bitmap like scanobject. However, since
   932  // heapBitsSetType is only used for objects that have not yet been
   933  // made reachable, readers will ignore bits being modified by this
   934  // function. This does mean this function cannot transiently modify
   935  // bits that belong to neighboring objects. Also, on weakly-ordered
   936  // machines, callers must execute a store/store (publication) barrier
   937  // between calling this function and making the object reachable.
   938  func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
   939  	const doubleCheck = false // slow but helpful; enable to test modifications to this code
   940  
   941  	// dataSize is always size rounded up to the next malloc size class,
   942  	// except in the case of allocating a defer block, in which case
   943  	// size is sizeof(_defer{}) (at least 6 words) and dataSize may be
   944  	// arbitrarily larger.
   945  	//
   946  	// The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore
   947  	// assume that dataSize == size without checking it explicitly.
   948  
   949  	if sys.PtrSize == 8 && size == sys.PtrSize {
   950  		// It's one word and it has pointers, it must be a pointer.
   951  		// Since all allocated one-word objects are pointers
   952  		// (non-pointers are aggregated into tinySize allocations),
   953  		// initSpan sets the pointer bits for us. Nothing to do here.
   954  		if doubleCheck {
   955  			h := heapBitsForAddr(x)
   956  			if !h.isPointer() {
   957  				throw("heapBitsSetType: pointer bit missing")
   958  			}
   959  			if !h.morePointers() {
   960  				throw("heapBitsSetType: scan bit missing")
   961  			}
   962  		}
   963  		return
   964  	}
   965  
   966  	h := heapBitsForAddr(x)
   967  	ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below)
   968  
   969  	// Heap bitmap bits for 2-word object are only 4 bits,
   970  	// so also shared with objects next to it.
   971  	// This is called out as a special case primarily for 32-bit systems,
   972  	// so that on 32-bit systems the code below can assume all objects
   973  	// are 4-word aligned (because they're all 16-byte aligned).
   974  	if size == 2*sys.PtrSize {
   975  		if typ.size == sys.PtrSize {
   976  			// We're allocating a block big enough to hold two pointers.
   977  			// On 64-bit, that means the actual object must be two pointers,
   978  			// or else we'd have used the one-pointer-sized block.
   979  			// On 32-bit, however, this is the 8-byte block, the smallest one.
   980  			// So it could be that we're allocating one pointer and this was
   981  			// just the smallest block available. Distinguish by checking dataSize.
   982  			// (In general the number of instances of typ being allocated is
   983  			// dataSize/typ.size.)
   984  			if sys.PtrSize == 4 && dataSize == sys.PtrSize {
   985  				// 1 pointer object. On 32-bit machines clear the bit for the
   986  				// unused second word.
   987  				*h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
   988  				*h.bitp |= (bitPointer | bitScan) << h.shift
   989  			} else {
   990  				// 2-element slice of pointer.
   991  				*h.bitp |= (bitPointer | bitScan | bitPointer<<heapBitsShift) << h.shift
   992  			}
   993  			return
   994  		}
   995  		// Otherwise typ.size must be 2*sys.PtrSize,
   996  		// and typ.kind&kindGCProg == 0.
   997  		if doubleCheck {
   998  			if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
   999  				print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
  1000  				throw("heapBitsSetType")
  1001  			}
  1002  		}
  1003  		b := uint32(*ptrmask)
  1004  		hb := (b & 3) | bitScan
  1005  		// bitPointer == 1, bitScan is 1 << 4, heapBitsShift is 1.
  1006  		// 110011 is shifted h.shift and complemented.
  1007  		// This clears out the bits that are about to be
  1008  		// ored into *h.hbitp in the next instructions.
  1009  		*h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
  1010  		*h.bitp |= uint8(hb << h.shift)
  1011  		return
  1012  	}
  1013  
  1014  	// Copy from 1-bit ptrmask into 2-bit bitmap.
  1015  	// The basic approach is to use a single uintptr as a bit buffer,
  1016  	// alternating between reloading the buffer and writing bitmap bytes.
  1017  	// In general, one load can supply two bitmap byte writes.
  1018  	// This is a lot of lines of code, but it compiles into relatively few
  1019  	// machine instructions.
  1020  
  1021  	outOfPlace := false
  1022  	if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrand()%2 == 0) {
  1023  		// This object spans heap arenas, so the bitmap may be
  1024  		// discontiguous. Unroll it into the object instead
  1025  		// and then copy it out.
  1026  		//
  1027  		// In doubleCheck mode, we randomly do this anyway to
  1028  		// stress test the bitmap copying path.
  1029  		outOfPlace = true
  1030  		h.bitp = (*uint8)(unsafe.Pointer(x))
  1031  		h.last = nil
  1032  	}
  1033  
  1034  	var (
  1035  		// Ptrmask input.
  1036  		p     *byte   // last ptrmask byte read
  1037  		b     uintptr // ptrmask bits already loaded
  1038  		nb    uintptr // number of bits in b at next read
  1039  		endp  *byte   // final ptrmask byte to read (then repeat)
  1040  		endnb uintptr // number of valid bits in *endp
  1041  		pbits uintptr // alternate source of bits
  1042  
  1043  		// Heap bitmap output.
  1044  		w     uintptr // words processed
  1045  		nw    uintptr // number of words to process
  1046  		hbitp *byte   // next heap bitmap byte to write
  1047  		hb    uintptr // bits being prepared for *hbitp
  1048  	)
  1049  
  1050  	hbitp = h.bitp
  1051  
  1052  	// Handle GC program. Delayed until this part of the code
  1053  	// so that we can use the same double-checking mechanism
  1054  	// as the 1-bit case. Nothing above could have encountered
  1055  	// GC programs: the cases were all too small.
  1056  	if typ.kind&kindGCProg != 0 {
  1057  		heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
  1058  		if doubleCheck {
  1059  			// Double-check the heap bits written by GC program
  1060  			// by running the GC program to create a 1-bit pointer mask
  1061  			// and then jumping to the double-check code below.
  1062  			// This doesn't catch bugs shared between the 1-bit and 4-bit
  1063  			// GC program execution, but it does catch mistakes specific
  1064  			// to just one of those and bugs in heapBitsSetTypeGCProg's
  1065  			// implementation of arrays.
  1066  			lock(&debugPtrmask.lock)
  1067  			if debugPtrmask.data == nil {
  1068  				debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
  1069  			}
  1070  			ptrmask = debugPtrmask.data
  1071  			runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
  1072  		}
  1073  		goto Phase4
  1074  	}
  1075  
  1076  	// Note about sizes:
  1077  	//
  1078  	// typ.size is the number of words in the object,
  1079  	// and typ.ptrdata is the number of words in the prefix
  1080  	// of the object that contains pointers. That is, the final
  1081  	// typ.size - typ.ptrdata words contain no pointers.
  1082  	// This allows optimization of a common pattern where
  1083  	// an object has a small header followed by a large scalar
  1084  	// buffer. If we know the pointers are over, we don't have
  1085  	// to scan the buffer's heap bitmap at all.
  1086  	// The 1-bit ptrmasks are sized to contain only bits for
  1087  	// the typ.ptrdata prefix, zero padded out to a full byte
  1088  	// of bitmap. This code sets nw (below) so that heap bitmap
  1089  	// bits are only written for the typ.ptrdata prefix; if there is
  1090  	// more room in the allocated object, the next heap bitmap
  1091  	// entry is a 00, indicating that there are no more pointers
  1092  	// to scan. So only the ptrmask for the ptrdata bytes is needed.
  1093  	//
  1094  	// Replicated copies are not as nice: if there is an array of
  1095  	// objects with scalar tails, all but the last tail does have to
  1096  	// be initialized, because there is no way to say "skip forward".
  1097  	// However, because of the possibility of a repeated type with
  1098  	// size not a multiple of 4 pointers (one heap bitmap byte),
  1099  	// the code already must handle the last ptrmask byte specially
  1100  	// by treating it as containing only the bits for endnb pointers,
  1101  	// where endnb <= 4. We represent large scalar tails that must
  1102  	// be expanded in the replication by setting endnb larger than 4.
  1103  	// This will have the effect of reading many bits out of b,
  1104  	// but once the real bits are shifted out, b will supply as many
  1105  	// zero bits as we try to read, which is exactly what we need.
  1106  
  1107  	p = ptrmask
  1108  	if typ.size < dataSize {
  1109  		// Filling in bits for an array of typ.
  1110  		// Set up for repetition of ptrmask during main loop.
  1111  		// Note that ptrmask describes only a prefix of
  1112  		const maxBits = sys.PtrSize*8 - 7
  1113  		if typ.ptrdata/sys.PtrSize <= maxBits {
  1114  			// Entire ptrmask fits in uintptr with room for a byte fragment.
  1115  			// Load into pbits and never read from ptrmask again.
  1116  			// This is especially important when the ptrmask has
  1117  			// fewer than 8 bits in it; otherwise the reload in the middle
  1118  			// of the Phase 2 loop would itself need to loop to gather
  1119  			// at least 8 bits.
  1120  
  1121  			// Accumulate ptrmask into b.
  1122  			// ptrmask is sized to describe only typ.ptrdata, but we record
  1123  			// it as describing typ.size bytes, since all the high bits are zero.
  1124  			nb = typ.ptrdata / sys.PtrSize
  1125  			for i := uintptr(0); i < nb; i += 8 {
  1126  				b |= uintptr(*p) << i
  1127  				p = add1(p)
  1128  			}
  1129  			nb = typ.size / sys.PtrSize
  1130  
  1131  			// Replicate ptrmask to fill entire pbits uintptr.
  1132  			// Doubling and truncating is fewer steps than
  1133  			// iterating by nb each time. (nb could be 1.)
  1134  			// Since we loaded typ.ptrdata/sys.PtrSize bits
  1135  			// but are pretending to have typ.size/sys.PtrSize,
  1136  			// there might be no replication necessary/possible.
  1137  			pbits = b
  1138  			endnb = nb
  1139  			if nb+nb <= maxBits {
  1140  				for endnb <= sys.PtrSize*8 {
  1141  					pbits |= pbits << endnb
  1142  					endnb += endnb
  1143  				}
  1144  				// Truncate to a multiple of original ptrmask.
  1145  				// Because nb+nb <= maxBits, nb fits in a byte.
  1146  				// Byte division is cheaper than uintptr division.
  1147  				endnb = uintptr(maxBits/byte(nb)) * nb
  1148  				pbits &= 1<<endnb - 1
  1149  				b = pbits
  1150  				nb = endnb
  1151  			}
  1152  
  1153  			// Clear p and endp as sentinel for using pbits.
  1154  			// Checked during Phase 2 loop.
  1155  			p = nil
  1156  			endp = nil
  1157  		} else {
  1158  			// Ptrmask is larger. Read it multiple times.
  1159  			n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
  1160  			endp = addb(ptrmask, n)
  1161  			endnb = typ.size/sys.PtrSize - n*8
  1162  		}
  1163  	}
  1164  	if p != nil {
  1165  		b = uintptr(*p)
  1166  		p = add1(p)
  1167  		nb = 8
  1168  	}
  1169  
  1170  	if typ.size == dataSize {
  1171  		// Single entry: can stop once we reach the non-pointer data.
  1172  		nw = typ.ptrdata / sys.PtrSize
  1173  	} else {
  1174  		// Repeated instances of typ in an array.
  1175  		// Have to process first N-1 entries in full, but can stop
  1176  		// once we reach the non-pointer data in the final entry.
  1177  		nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
  1178  	}
  1179  	if nw == 0 {
  1180  		// No pointers! Caller was supposed to check.
  1181  		println("runtime: invalid type ", typ.string())
  1182  		throw("heapBitsSetType: called with non-pointer type")
  1183  		return
  1184  	}
  1185  	if nw < 2 {
  1186  		// Must write at least 2 words, because the "no scan"
  1187  		// encoding doesn't take effect until the third word.
  1188  		nw = 2
  1189  	}
  1190  
  1191  	// Phase 1: Special case for leading byte (shift==0) or half-byte (shift==2).
  1192  	// The leading byte is special because it contains the bits for word 1,
  1193  	// which does not have the scan bit set.
  1194  	// The leading half-byte is special because it's a half a byte,
  1195  	// so we have to be careful with the bits already there.
  1196  	switch {
  1197  	default:
  1198  		throw("heapBitsSetType: unexpected shift")
  1199  
  1200  	case h.shift == 0:
  1201  		// Ptrmask and heap bitmap are aligned.
  1202  		// Handle first byte of bitmap specially.
  1203  		//
  1204  		// The first byte we write out covers the first four
  1205  		// words of the object. The scan/dead bit on the first
  1206  		// word must be set to scan since there are pointers
  1207  		// somewhere in the object. The scan/dead bit on the
  1208  		// second word is the checkmark, so we don't set it.
  1209  		// In all following words, we set the scan/dead
  1210  		// appropriately to indicate that the object contains
  1211  		// to the next 2-bit entry in the bitmap.
  1212  		//
  1213  		// TODO: It doesn't matter if we set the checkmark, so
  1214  		// maybe this case isn't needed any more.
  1215  		hb = b & bitPointerAll
  1216  		hb |= bitScan | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
  1217  		if w += 4; w >= nw {
  1218  			goto Phase3
  1219  		}
  1220  		*hbitp = uint8(hb)
  1221  		hbitp = add1(hbitp)
  1222  		b >>= 4
  1223  		nb -= 4
  1224  
  1225  	case sys.PtrSize == 8 && h.shift == 2:
  1226  		// Ptrmask and heap bitmap are misaligned.
  1227  		// The bits for the first two words are in a byte shared
  1228  		// with another object, so we must be careful with the bits
  1229  		// already there.
  1230  		// We took care of 1-word and 2-word objects above,
  1231  		// so this is at least a 6-word object.
  1232  		hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift)
  1233  		// This is not noscan, so set the scan bit in the
  1234  		// first word.
  1235  		hb |= bitScan << (2 * heapBitsShift)
  1236  		b >>= 2
  1237  		nb -= 2
  1238  		// Note: no bitScan for second word because that's
  1239  		// the checkmark.
  1240  		*hbitp &^= uint8((bitPointer | bitScan | (bitPointer << heapBitsShift)) << (2 * heapBitsShift))
  1241  		*hbitp |= uint8(hb)
  1242  		hbitp = add1(hbitp)
  1243  		if w += 2; w >= nw {
  1244  			// We know that there is more data, because we handled 2-word objects above.
  1245  			// This must be at least a 6-word object. If we're out of pointer words,
  1246  			// mark no scan in next bitmap byte and finish.
  1247  			hb = 0
  1248  			w += 4
  1249  			goto Phase3
  1250  		}
  1251  	}
  1252  
  1253  	// Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap.
  1254  	// The loop computes the bits for that last write but does not execute the write;
  1255  	// it leaves the bits in hb for processing by phase 3.
  1256  	// To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to
  1257  	// use in the first half of the loop right now, and then we only adjust nb explicitly
  1258  	// if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop.
  1259  	nb -= 4
  1260  	for {
  1261  		// Emit bitmap byte.
  1262  		// b has at least nb+4 bits, with one exception:
  1263  		// if w+4 >= nw, then b has only nw-w bits,
  1264  		// but we'll stop at the break and then truncate
  1265  		// appropriately in Phase 3.
  1266  		hb = b & bitPointerAll
  1267  		hb |= bitScanAll
  1268  		if w += 4; w >= nw {
  1269  			break
  1270  		}
  1271  		*hbitp = uint8(hb)
  1272  		hbitp = add1(hbitp)
  1273  		b >>= 4
  1274  
  1275  		// Load more bits. b has nb right now.
  1276  		if p != endp {
  1277  			// Fast path: keep reading from ptrmask.
  1278  			// nb unmodified: we just loaded 8 bits,
  1279  			// and the next iteration will consume 8 bits,
  1280  			// leaving us with the same nb the next time we're here.
  1281  			if nb < 8 {
  1282  				b |= uintptr(*p) << nb
  1283  				p = add1(p)
  1284  			} else {
  1285  				// Reduce the number of bits in b.
  1286  				// This is important if we skipped
  1287  				// over a scalar tail, since nb could
  1288  				// be larger than the bit width of b.
  1289  				nb -= 8
  1290  			}
  1291  		} else if p == nil {
  1292  			// Almost as fast path: track bit count and refill from pbits.
  1293  			// For short repetitions.
  1294  			if nb < 8 {
  1295  				b |= pbits << nb
  1296  				nb += endnb
  1297  			}
  1298  			nb -= 8 // for next iteration
  1299  		} else {
  1300  			// Slow path: reached end of ptrmask.
  1301  			// Process final partial byte and rewind to start.
  1302  			b |= uintptr(*p) << nb
  1303  			nb += endnb
  1304  			if nb < 8 {
  1305  				b |= uintptr(*ptrmask) << nb
  1306  				p = add1(ptrmask)
  1307  			} else {
  1308  				nb -= 8
  1309  				p = ptrmask
  1310  			}
  1311  		}
  1312  
  1313  		// Emit bitmap byte.
  1314  		hb = b & bitPointerAll
  1315  		hb |= bitScanAll
  1316  		if w += 4; w >= nw {
  1317  			break
  1318  		}
  1319  		*hbitp = uint8(hb)
  1320  		hbitp = add1(hbitp)
  1321  		b >>= 4
  1322  	}
  1323  
  1324  Phase3:
  1325  	// Phase 3: Write last byte or partial byte and zero the rest of the bitmap entries.
  1326  	if w > nw {
  1327  		// Counting the 4 entries in hb not yet written to memory,
  1328  		// there are more entries than possible pointer slots.
  1329  		// Discard the excess entries (can't be more than 3).
  1330  		mask := uintptr(1)<<(4-(w-nw)) - 1
  1331  		hb &= mask | mask<<4 // apply mask to both pointer bits and scan bits
  1332  	}
  1333  
  1334  	// Change nw from counting possibly-pointer words to total words in allocation.
  1335  	nw = size / sys.PtrSize
  1336  
  1337  	// Write whole bitmap bytes.
  1338  	// The first is hb, the rest are zero.
  1339  	if w <= nw {
  1340  		*hbitp = uint8(hb)
  1341  		hbitp = add1(hbitp)
  1342  		hb = 0 // for possible final half-byte below
  1343  		for w += 4; w <= nw; w += 4 {
  1344  			*hbitp = 0
  1345  			hbitp = add1(hbitp)
  1346  		}
  1347  	}
  1348  
  1349  	// Write final partial bitmap byte if any.
  1350  	// We know w > nw, or else we'd still be in the loop above.
  1351  	// It can be bigger only due to the 4 entries in hb that it counts.
  1352  	// If w == nw+4 then there's nothing left to do: we wrote all nw entries
  1353  	// and can discard the 4 sitting in hb.
  1354  	// But if w == nw+2, we need to write first two in hb.
  1355  	// The byte is shared with the next object, so be careful with
  1356  	// existing bits.
  1357  	if w == nw+2 {
  1358  		*hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb)
  1359  	}
  1360  
  1361  Phase4:
  1362  	// Phase 4: Copy unrolled bitmap to per-arena bitmaps, if necessary.
  1363  	if outOfPlace {
  1364  		// TODO: We could probably make this faster by
  1365  		// handling [x+dataSize, x+size) specially.
  1366  		h := heapBitsForAddr(x)
  1367  		// cnw is the number of heap words, or bit pairs
  1368  		// remaining (like nw above).
  1369  		cnw := size / sys.PtrSize
  1370  		src := (*uint8)(unsafe.Pointer(x))
  1371  		// We know the first and last byte of the bitmap are
  1372  		// not the same, but it's still possible for small
  1373  		// objects span arenas, so it may share bitmap bytes
  1374  		// with neighboring objects.
  1375  		//
  1376  		// Handle the first byte specially if it's shared. See
  1377  		// Phase 1 for why this is the only special case we need.
  1378  		if doubleCheck {
  1379  			if !(h.shift == 0 || (sys.PtrSize == 8 && h.shift == 2)) {
  1380  				print("x=", x, " size=", size, " cnw=", h.shift, "\n")
  1381  				throw("bad start shift")
  1382  			}
  1383  		}
  1384  		if sys.PtrSize == 8 && h.shift == 2 {
  1385  			*h.bitp = *h.bitp&^((bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift)<<(2*heapBitsShift)) | *src
  1386  			h = h.next().next()
  1387  			cnw -= 2
  1388  			src = addb(src, 1)
  1389  		}
  1390  		// We're now byte aligned. Copy out to per-arena
  1391  		// bitmaps until the last byte (which may again be
  1392  		// partial).
  1393  		for cnw >= 4 {
  1394  			// This loop processes four words at a time,
  1395  			// so round cnw down accordingly.
  1396  			hNext, words := h.forwardOrBoundary(cnw / 4 * 4)
  1397  
  1398  			// n is the number of bitmap bytes to copy.
  1399  			n := words / 4
  1400  			memmove(unsafe.Pointer(h.bitp), unsafe.Pointer(src), n)
  1401  			cnw -= words
  1402  			h = hNext
  1403  			src = addb(src, n)
  1404  		}
  1405  		if doubleCheck && h.shift != 0 {
  1406  			print("cnw=", cnw, " h.shift=", h.shift, "\n")
  1407  			throw("bad shift after block copy")
  1408  		}
  1409  		// Handle the last byte if it's shared.
  1410  		if cnw == 2 {
  1411  			*h.bitp = *h.bitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | *src
  1412  			src = addb(src, 1)
  1413  			h = h.next().next()
  1414  		}
  1415  		if doubleCheck {
  1416  			if uintptr(unsafe.Pointer(src)) > x+size {
  1417  				throw("copy exceeded object size")
  1418  			}
  1419  			if !(cnw == 0 || cnw == 2) {
  1420  				print("x=", x, " size=", size, " cnw=", cnw, "\n")
  1421  				throw("bad number of remaining words")
  1422  			}
  1423  			// Set up hbitp so doubleCheck code below can check it.
  1424  			hbitp = h.bitp
  1425  		}
  1426  		// Zero the object where we wrote the bitmap.
  1427  		memclrNoHeapPointers(unsafe.Pointer(x), uintptr(unsafe.Pointer(src))-x)
  1428  	}
  1429  
  1430  	// Double check the whole bitmap.
  1431  	if doubleCheck {
  1432  		// x+size may not point to the heap, so back up one
  1433  		// word and then call next().
  1434  		end := heapBitsForAddr(x + size - sys.PtrSize).next()
  1435  		endAI := arenaIdx(end.arena)
  1436  		if !outOfPlace && (end.bitp == nil || (end.shift == 0 && end.bitp == &mheap_.arenas[endAI.l1()][endAI.l2()].bitmap[0])) {
  1437  			// The unrolling code above walks hbitp just
  1438  			// past the bitmap without moving to the next
  1439  			// arena. Synthesize this for end.bitp.
  1440  			end.arena--
  1441  			endAI = arenaIdx(end.arena)
  1442  			end.bitp = addb(&mheap_.arenas[endAI.l1()][endAI.l2()].bitmap[0], heapArenaBitmapBytes)
  1443  			end.last = nil
  1444  		}
  1445  		if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) {
  1446  			println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size)
  1447  			print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
  1448  			print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
  1449  			h0 := heapBitsForAddr(x)
  1450  			print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
  1451  			print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n")
  1452  			throw("bad heapBitsSetType")
  1453  		}
  1454  
  1455  		// Double-check that bits to be written were written correctly.
  1456  		// Does not check that other bits were not written, unfortunately.
  1457  		h := heapBitsForAddr(x)
  1458  		nptr := typ.ptrdata / sys.PtrSize
  1459  		ndata := typ.size / sys.PtrSize
  1460  		count := dataSize / typ.size
  1461  		totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
  1462  		for i := uintptr(0); i < size/sys.PtrSize; i++ {
  1463  			j := i % ndata
  1464  			var have, want uint8
  1465  			have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
  1466  			if i >= totalptr {
  1467  				want = 0 // deadmarker
  1468  				if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 {
  1469  					want = bitScan
  1470  				}
  1471  			} else {
  1472  				if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 {
  1473  					want |= bitPointer
  1474  				}
  1475  				if i != 1 {
  1476  					want |= bitScan
  1477  				} else {
  1478  					have &^= bitScan
  1479  				}
  1480  			}
  1481  			if have != want {
  1482  				println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size)
  1483  				print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
  1484  				print("kindGCProg=", typ.kind&kindGCProg != 0, " outOfPlace=", outOfPlace, "\n")
  1485  				print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
  1486  				h0 := heapBitsForAddr(x)
  1487  				print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
  1488  				print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
  1489  				print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
  1490  				println("at word", i, "offset", i*sys.PtrSize, "have", hex(have), "want", hex(want))
  1491  				if typ.kind&kindGCProg != 0 {
  1492  					println("GC program:")
  1493  					dumpGCProg(addb(typ.gcdata, 4))
  1494  				}
  1495  				throw("bad heapBitsSetType")
  1496  			}
  1497  			h = h.next()
  1498  		}
  1499  		if ptrmask == debugPtrmask.data {
  1500  			unlock(&debugPtrmask.lock)
  1501  		}
  1502  	}
  1503  }
  1504  
  1505  var debugPtrmask struct {
  1506  	lock mutex
  1507  	data *byte
  1508  }
  1509  
  1510  // heapBitsSetTypeGCProg implements heapBitsSetType using a GC program.
  1511  // progSize is the size of the memory described by the program.
  1512  // elemSize is the size of the element that the GC program describes (a prefix of).
  1513  // dataSize is the total size of the intended data, a multiple of elemSize.
  1514  // allocSize is the total size of the allocated memory.
  1515  //
  1516  // GC programs are only used for large allocations.
  1517  // heapBitsSetType requires that allocSize is a multiple of 4 words,
  1518  // so that the relevant bitmap bytes are not shared with surrounding
  1519  // objects.
  1520  func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
  1521  	if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
  1522  		// Alignment will be wrong.
  1523  		throw("heapBitsSetTypeGCProg: small allocation")
  1524  	}
  1525  	var totalBits uintptr
  1526  	if elemSize == dataSize {
  1527  		totalBits = runGCProg(prog, nil, h.bitp, 2)
  1528  		if totalBits*sys.PtrSize != progSize {
  1529  			println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
  1530  			throw("heapBitsSetTypeGCProg: unexpected bit count")
  1531  		}
  1532  	} else {
  1533  		count := dataSize / elemSize
  1534  
  1535  		// Piece together program trailer to run after prog that does:
  1536  		//	literal(0)
  1537  		//	repeat(1, elemSize-progSize-1) // zeros to fill element size
  1538  		//	repeat(elemSize, count-1) // repeat that element for count
  1539  		// This zero-pads the data remaining in the first element and then
  1540  		// repeats that first element to fill the array.
  1541  		var trailer [40]byte // 3 varints (max 10 each) + some bytes
  1542  		i := 0
  1543  		if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
  1544  			// literal(0)
  1545  			trailer[i] = 0x01
  1546  			i++
  1547  			trailer[i] = 0
  1548  			i++
  1549  			if n > 1 {
  1550  				// repeat(1, n-1)
  1551  				trailer[i] = 0x81
  1552  				i++
  1553  				n--
  1554  				for ; n >= 0x80; n >>= 7 {
  1555  					trailer[i] = byte(n | 0x80)
  1556  					i++
  1557  				}
  1558  				trailer[i] = byte(n)
  1559  				i++
  1560  			}
  1561  		}
  1562  		// repeat(elemSize/ptrSize, count-1)
  1563  		trailer[i] = 0x80
  1564  		i++
  1565  		n := elemSize / sys.PtrSize
  1566  		for ; n >= 0x80; n >>= 7 {
  1567  			trailer[i] = byte(n | 0x80)
  1568  			i++
  1569  		}
  1570  		trailer[i] = byte(n)
  1571  		i++
  1572  		n = count - 1
  1573  		for ; n >= 0x80; n >>= 7 {
  1574  			trailer[i] = byte(n | 0x80)
  1575  			i++
  1576  		}
  1577  		trailer[i] = byte(n)
  1578  		i++
  1579  		trailer[i] = 0
  1580  		i++
  1581  
  1582  		runGCProg(prog, &trailer[0], h.bitp, 2)
  1583  
  1584  		// Even though we filled in the full array just now,
  1585  		// record that we only filled in up to the ptrdata of the
  1586  		// last element. This will cause the code below to
  1587  		// memclr the dead section of the final array element,
  1588  		// so that scanobject can stop early in the final element.
  1589  		totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
  1590  	}
  1591  	endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
  1592  	endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/sys.PtrSize/wordsPerBitmapByte))
  1593  	memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
  1594  }
  1595  
  1596  // progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
  1597  // size the size of the region described by prog, in bytes.
  1598  // The resulting bitvector will have no more than size/sys.PtrSize bits.
  1599  func progToPointerMask(prog *byte, size uintptr) bitvector {
  1600  	n := (size/sys.PtrSize + 7) / 8
  1601  	x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
  1602  	x[len(x)-1] = 0xa1 // overflow check sentinel
  1603  	n = runGCProg(prog, nil, &x[0], 1)
  1604  	if x[len(x)-1] != 0xa1 {
  1605  		throw("progToPointerMask: overflow")
  1606  	}
  1607  	return bitvector{int32(n), &x[0]}
  1608  }
  1609  
  1610  // Packed GC pointer bitmaps, aka GC programs.
  1611  //
  1612  // For large types containing arrays, the type information has a
  1613  // natural repetition that can be encoded to save space in the
  1614  // binary and in the memory representation of the type information.
  1615  //
  1616  // The encoding is a simple Lempel-Ziv style bytecode machine
  1617  // with the following instructions:
  1618  //
  1619  //	00000000: stop
  1620  //	0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
  1621  //	10000000 n c: repeat the previous n bits c times; n, c are varints
  1622  //	1nnnnnnn c: repeat the previous n bits c times; c is a varint
  1623  
  1624  // runGCProg executes the GC program prog, and then trailer if non-nil,
  1625  // writing to dst with entries of the given size.
  1626  // If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst.
  1627  // If size == 2, dst is the 2-bit heap bitmap, and writes move backward
  1628  // starting at dst (because the heap bitmap does). In this case, the caller guarantees
  1629  // that only whole bytes in dst need to be written.
  1630  //
  1631  // runGCProg returns the number of 1- or 2-bit entries written to memory.
  1632  func runGCProg(prog, trailer, dst *byte, size int) uintptr {
  1633  	dstStart := dst
  1634  
  1635  	// Bits waiting to be written to memory.
  1636  	var bits uintptr
  1637  	var nbits uintptr
  1638  
  1639  	p := prog
  1640  Run:
  1641  	for {
  1642  		// Flush accumulated full bytes.
  1643  		// The rest of the loop assumes that nbits <= 7.
  1644  		for ; nbits >= 8; nbits -= 8 {
  1645  			if size == 1 {
  1646  				*dst = uint8(bits)
  1647  				dst = add1(dst)
  1648  				bits >>= 8
  1649  			} else {
  1650  				v := bits&bitPointerAll | bitScanAll
  1651  				*dst = uint8(v)
  1652  				dst = add1(dst)
  1653  				bits >>= 4
  1654  				v = bits&bitPointerAll | bitScanAll
  1655  				*dst = uint8(v)
  1656  				dst = add1(dst)
  1657  				bits >>= 4
  1658  			}
  1659  		}
  1660  
  1661  		// Process one instruction.
  1662  		inst := uintptr(*p)
  1663  		p = add1(p)
  1664  		n := inst & 0x7F
  1665  		if inst&0x80 == 0 {
  1666  			// Literal bits; n == 0 means end of program.
  1667  			if n == 0 {
  1668  				// Program is over; continue in trailer if present.
  1669  				if trailer != nil {
  1670  					p = trailer
  1671  					trailer = nil
  1672  					continue
  1673  				}
  1674  				break Run
  1675  			}
  1676  			nbyte := n / 8
  1677  			for i := uintptr(0); i < nbyte; i++ {
  1678  				bits |= uintptr(*p) << nbits
  1679  				p = add1(p)
  1680  				if size == 1 {
  1681  					*dst = uint8(bits)
  1682  					dst = add1(dst)
  1683  					bits >>= 8
  1684  				} else {
  1685  					v := bits&0xf | bitScanAll
  1686  					*dst = uint8(v)
  1687  					dst = add1(dst)
  1688  					bits >>= 4
  1689  					v = bits&0xf | bitScanAll
  1690  					*dst = uint8(v)
  1691  					dst = add1(dst)
  1692  					bits >>= 4
  1693  				}
  1694  			}
  1695  			if n %= 8; n > 0 {
  1696  				bits |= uintptr(*p) << nbits
  1697  				p = add1(p)
  1698  				nbits += n
  1699  			}
  1700  			continue Run
  1701  		}
  1702  
  1703  		// Repeat. If n == 0, it is encoded in a varint in the next bytes.
  1704  		if n == 0 {
  1705  			for off := uint(0); ; off += 7 {
  1706  				x := uintptr(*p)
  1707  				p = add1(p)
  1708  				n |= (x & 0x7F) << off
  1709  				if x&0x80 == 0 {
  1710  					break
  1711  				}
  1712  			}
  1713  		}
  1714  
  1715  		// Count is encoded in a varint in the next bytes.
  1716  		c := uintptr(0)
  1717  		for off := uint(0); ; off += 7 {
  1718  			x := uintptr(*p)
  1719  			p = add1(p)
  1720  			c |= (x & 0x7F) << off
  1721  			if x&0x80 == 0 {
  1722  				break
  1723  			}
  1724  		}
  1725  		c *= n // now total number of bits to copy
  1726  
  1727  		// If the number of bits being repeated is small, load them
  1728  		// into a register and use that register for the entire loop
  1729  		// instead of repeatedly reading from memory.
  1730  		// Handling fewer than 8 bits here makes the general loop simpler.
  1731  		// The cutoff is sys.PtrSize*8 - 7 to guarantee that when we add
  1732  		// the pattern to a bit buffer holding at most 7 bits (a partial byte)
  1733  		// it will not overflow.
  1734  		src := dst
  1735  		const maxBits = sys.PtrSize*8 - 7
  1736  		if n <= maxBits {
  1737  			// Start with bits in output buffer.
  1738  			pattern := bits
  1739  			npattern := nbits
  1740  
  1741  			// If we need more bits, fetch them from memory.
  1742  			if size == 1 {
  1743  				src = subtract1(src)
  1744  				for npattern < n {
  1745  					pattern <<= 8
  1746  					pattern |= uintptr(*src)
  1747  					src = subtract1(src)
  1748  					npattern += 8
  1749  				}
  1750  			} else {
  1751  				src = subtract1(src)
  1752  				for npattern < n {
  1753  					pattern <<= 4
  1754  					pattern |= uintptr(*src) & 0xf
  1755  					src = subtract1(src)
  1756  					npattern += 4
  1757  				}
  1758  			}
  1759  
  1760  			// We started with the whole bit output buffer,
  1761  			// and then we loaded bits from whole bytes.
  1762  			// Either way, we might now have too many instead of too few.
  1763  			// Discard the extra.
  1764  			if npattern > n {
  1765  				pattern >>= npattern - n
  1766  				npattern = n
  1767  			}
  1768  
  1769  			// Replicate pattern to at most maxBits.
  1770  			if npattern == 1 {
  1771  				// One bit being repeated.
  1772  				// If the bit is 1, make the pattern all 1s.
  1773  				// If the bit is 0, the pattern is already all 0s,
  1774  				// but we can claim that the number of bits
  1775  				// in the word is equal to the number we need (c),
  1776  				// because right shift of bits will zero fill.
  1777  				if pattern == 1 {
  1778  					pattern = 1<<maxBits - 1
  1779  					npattern = maxBits
  1780  				} else {
  1781  					npattern = c
  1782  				}
  1783  			} else {
  1784  				b := pattern
  1785  				nb := npattern
  1786  				if nb+nb <= maxBits {
  1787  					// Double pattern until the whole uintptr is filled.
  1788  					for nb <= sys.PtrSize*8 {
  1789  						b |= b << nb
  1790  						nb += nb
  1791  					}
  1792  					// Trim away incomplete copy of original pattern in high bits.
  1793  					// TODO(rsc): Replace with table lookup or loop on systems without divide?
  1794  					nb = maxBits / npattern * npattern
  1795  					b &= 1<<nb - 1
  1796  					pattern = b
  1797  					npattern = nb
  1798  				}
  1799  			}
  1800  
  1801  			// Add pattern to bit buffer and flush bit buffer, c/npattern times.
  1802  			// Since pattern contains >8 bits, there will be full bytes to flush
  1803  			// on each iteration.
  1804  			for ; c >= npattern; c -= npattern {
  1805  				bits |= pattern << nbits
  1806  				nbits += npattern
  1807  				if size == 1 {
  1808  					for nbits >= 8 {
  1809  						*dst = uint8(bits)
  1810  						dst = add1(dst)
  1811  						bits >>= 8
  1812  						nbits -= 8
  1813  					}
  1814  				} else {
  1815  					for nbits >= 4 {
  1816  						*dst = uint8(bits&0xf | bitScanAll)
  1817  						dst = add1(dst)
  1818  						bits >>= 4
  1819  						nbits -= 4
  1820  					}
  1821  				}
  1822  			}
  1823  
  1824  			// Add final fragment to bit buffer.
  1825  			if c > 0 {
  1826  				pattern &= 1<<c - 1
  1827  				bits |= pattern << nbits
  1828  				nbits += c
  1829  			}
  1830  			continue Run
  1831  		}
  1832  
  1833  		// Repeat; n too large to fit in a register.
  1834  		// Since nbits <= 7, we know the first few bytes of repeated data
  1835  		// are already written to memory.
  1836  		off := n - nbits // n > nbits because n > maxBits and nbits <= 7
  1837  		if size == 1 {
  1838  			// Leading src fragment.
  1839  			src = subtractb(src, (off+7)/8)
  1840  			if frag := off & 7; frag != 0 {
  1841  				bits |= uintptr(*src) >> (8 - frag) << nbits
  1842  				src = add1(src)
  1843  				nbits += frag
  1844  				c -= frag
  1845  			}
  1846  			// Main loop: load one byte, write another.
  1847  			// The bits are rotating through the bit buffer.
  1848  			for i := c / 8; i > 0; i-- {
  1849  				bits |= uintptr(*src) << nbits
  1850  				src = add1(src)
  1851  				*dst = uint8(bits)
  1852  				dst = add1(dst)
  1853  				bits >>= 8
  1854  			}
  1855  			// Final src fragment.
  1856  			if c %= 8; c > 0 {
  1857  				bits |= (uintptr(*src) & (1<<c - 1)) << nbits
  1858  				nbits += c
  1859  			}
  1860  		} else {
  1861  			// Leading src fragment.
  1862  			src = subtractb(src, (off+3)/4)
  1863  			if frag := off & 3; frag != 0 {
  1864  				bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits
  1865  				src = add1(src)
  1866  				nbits += frag
  1867  				c -= frag
  1868  			}
  1869  			// Main loop: load one byte, write another.
  1870  			// The bits are rotating through the bit buffer.
  1871  			for i := c / 4; i > 0; i-- {
  1872  				bits |= (uintptr(*src) & 0xf) << nbits
  1873  				src = add1(src)
  1874  				*dst = uint8(bits&0xf | bitScanAll)
  1875  				dst = add1(dst)
  1876  				bits >>= 4
  1877  			}
  1878  			// Final src fragment.
  1879  			if c %= 4; c > 0 {
  1880  				bits |= (uintptr(*src) & (1<<c - 1)) << nbits
  1881  				nbits += c
  1882  			}
  1883  		}
  1884  	}
  1885  
  1886  	// Write any final bits out, using full-byte writes, even for the final byte.
  1887  	var totalBits uintptr
  1888  	if size == 1 {
  1889  		totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
  1890  		nbits += -nbits & 7
  1891  		for ; nbits > 0; nbits -= 8 {
  1892  			*dst = uint8(bits)
  1893  			dst = add1(dst)
  1894  			bits >>= 8
  1895  		}
  1896  	} else {
  1897  		totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*4 + nbits
  1898  		nbits += -nbits & 3
  1899  		for ; nbits > 0; nbits -= 4 {
  1900  			v := bits&0xf | bitScanAll
  1901  			*dst = uint8(v)
  1902  			dst = add1(dst)
  1903  			bits >>= 4
  1904  		}
  1905  	}
  1906  	return totalBits
  1907  }
  1908  
  1909  // materializeGCProg allocates space for the (1-bit) pointer bitmask
  1910  // for an object of size ptrdata.  Then it fills that space with the
  1911  // pointer bitmask specified by the program prog.
  1912  // The bitmask starts at s.startAddr.
  1913  // The result must be deallocated with dematerializeGCProg.
  1914  func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
  1915  	s := mheap_.allocManual((ptrdata/(8*sys.PtrSize)+pageSize-1)/pageSize, &memstats.gc_sys)
  1916  	runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1)
  1917  	return s
  1918  }
  1919  func dematerializeGCProg(s *mspan) {
  1920  	mheap_.freeManual(s, &memstats.gc_sys)
  1921  }
  1922  
  1923  func dumpGCProg(p *byte) {
  1924  	nptr := 0
  1925  	for {
  1926  		x := *p
  1927  		p = add1(p)
  1928  		if x == 0 {
  1929  			print("\t", nptr, " end\n")
  1930  			break
  1931  		}
  1932  		if x&0x80 == 0 {
  1933  			print("\t", nptr, " lit ", x, ":")
  1934  			n := int(x+7) / 8
  1935  			for i := 0; i < n; i++ {
  1936  				print(" ", hex(*p))
  1937  				p = add1(p)
  1938  			}
  1939  			print("\n")
  1940  			nptr += int(x)
  1941  		} else {
  1942  			nbit := int(x &^ 0x80)
  1943  			if nbit == 0 {
  1944  				for nb := uint(0); ; nb += 7 {
  1945  					x := *p
  1946  					p = add1(p)
  1947  					nbit |= int(x&0x7f) << nb
  1948  					if x&0x80 == 0 {
  1949  						break
  1950  					}
  1951  				}
  1952  			}
  1953  			count := 0
  1954  			for nb := uint(0); ; nb += 7 {
  1955  				x := *p
  1956  				p = add1(p)
  1957  				count |= int(x&0x7f) << nb
  1958  				if x&0x80 == 0 {
  1959  					break
  1960  				}
  1961  			}
  1962  			print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
  1963  			nptr += nbit * count
  1964  		}
  1965  	}
  1966  }
  1967  
  1968  // Testing.
  1969  
  1970  func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
  1971  	target := (*stkframe)(ctxt)
  1972  	if frame.sp <= target.sp && target.sp < frame.varp {
  1973  		*target = *frame
  1974  		return false
  1975  	}
  1976  	return true
  1977  }
  1978  
  1979  // gcbits returns the GC type info for x, for testing.
  1980  // The result is the bitmap entries (0 or 1), one entry per byte.
  1981  //go:linkname reflect_gcbits reflect.gcbits
  1982  func reflect_gcbits(x interface{}) []byte {
  1983  	ret := getgcmask(x)
  1984  	typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
  1985  	nptr := typ.ptrdata / sys.PtrSize
  1986  	for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
  1987  		ret = ret[:len(ret)-1]
  1988  	}
  1989  	return ret
  1990  }
  1991  
  1992  // Returns GC type info for the pointer stored in ep for testing.
  1993  // If ep points to the stack, only static live information will be returned
  1994  // (i.e. not for objects which are only dynamically live stack objects).
  1995  func getgcmask(ep interface{}) (mask []byte) {
  1996  	e := *efaceOf(&ep)
  1997  	p := e.data
  1998  	t := e._type
  1999  	// data or bss
  2000  	for _, datap := range activeModules() {
  2001  		// data
  2002  		if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
  2003  			bitmap := datap.gcdatamask.bytedata
  2004  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  2005  			mask = make([]byte, n/sys.PtrSize)
  2006  			for i := uintptr(0); i < n; i += sys.PtrSize {
  2007  				off := (uintptr(p) + i - datap.data) / sys.PtrSize
  2008  				mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  2009  			}
  2010  			return
  2011  		}
  2012  
  2013  		// bss
  2014  		if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
  2015  			bitmap := datap.gcbssmask.bytedata
  2016  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  2017  			mask = make([]byte, n/sys.PtrSize)
  2018  			for i := uintptr(0); i < n; i += sys.PtrSize {
  2019  				off := (uintptr(p) + i - datap.bss) / sys.PtrSize
  2020  				mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  2021  			}
  2022  			return
  2023  		}
  2024  	}
  2025  
  2026  	// heap
  2027  	if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
  2028  		hbits := heapBitsForAddr(base)
  2029  		n := s.elemsize
  2030  		mask = make([]byte, n/sys.PtrSize)
  2031  		for i := uintptr(0); i < n; i += sys.PtrSize {
  2032  			if hbits.isPointer() {
  2033  				mask[i/sys.PtrSize] = 1
  2034  			}
  2035  			if i != 1*sys.PtrSize && !hbits.morePointers() {
  2036  				mask = mask[:i/sys.PtrSize]
  2037  				break
  2038  			}
  2039  			hbits = hbits.next()
  2040  		}
  2041  		return
  2042  	}
  2043  
  2044  	// stack
  2045  	if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi {
  2046  		var frame stkframe
  2047  		frame.sp = uintptr(p)
  2048  		_g_ := getg()
  2049  		gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
  2050  		if frame.fn.valid() {
  2051  			locals, _, _ := getStackMap(&frame, nil, false)
  2052  			if locals.n == 0 {
  2053  				return
  2054  			}
  2055  			size := uintptr(locals.n) * sys.PtrSize
  2056  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  2057  			mask = make([]byte, n/sys.PtrSize)
  2058  			for i := uintptr(0); i < n; i += sys.PtrSize {
  2059  				off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
  2060  				mask[i/sys.PtrSize] = locals.ptrbit(off)
  2061  			}
  2062  		}
  2063  		return
  2064  	}
  2065  
  2066  	// otherwise, not something the GC knows about.
  2067  	// possibly read-only data, like malloc(0).
  2068  	// must not have pointers
  2069  	return
  2070  }
  2071  

View as plain text