Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/mbitmap.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: type and heap bitmaps.
     6  //
     7  // Stack, data, and bss bitmaps
     8  //
     9  // Stack frames and global variables in the data and bss sections are described
    10  // by 1-bit bitmaps in which 0 means uninteresting and 1 means live pointer
    11  // to be visited during GC. The bits in each byte are consumed starting with
    12  // the low bit: 1<<0, 1<<1, and so on.
    13  //
    14  // Heap bitmap
    15  //
    16  // The heap bitmap comprises 2 bits for each pointer-sized word in the heap,
    17  // stored in the heapArena metadata backing each heap arena.
    18  // That is, if ha is the heapArena for the arena starting a start,
    19  // then ha.bitmap[0] holds the 2-bit entries for the four words start
    20  // through start+3*ptrSize, ha.bitmap[1] holds the entries for
    21  // start+4*ptrSize through start+7*ptrSize, and so on.
    22  //
    23  // In each 2-bit entry, the lower bit holds the same information as in the 1-bit
    24  // bitmaps: 0 means uninteresting and 1 means live pointer to be visited during GC.
    25  // The meaning of the high bit depends on the position of the word being described
    26  // in its allocated object. In all words *except* the second word, the
    27  // high bit indicates that the object is still being described. In
    28  // these words, if a bit pair with a high bit 0 is encountered, the
    29  // low bit can also be assumed to be 0, and the object description is
    30  // over. This 00 is called the ``dead'' encoding: it signals that the
    31  // rest of the words in the object are uninteresting to the garbage
    32  // collector.
    33  //
    34  // In the second word, the high bit is the GC ``checkmarked'' bit (see below).
    35  //
    36  // The 2-bit entries are split when written into the byte, so that the top half
    37  // of the byte contains 4 high bits and the bottom half contains 4 low (pointer)
    38  // bits.
    39  // This form allows a copy from the 1-bit to the 4-bit form to keep the
    40  // pointer bits contiguous, instead of having to space them out.
    41  //
    42  // The code makes use of the fact that the zero value for a heap bitmap
    43  // has no live pointer bit set and is (depending on position), not used,
    44  // not checkmarked, and is the dead encoding.
    45  // These properties must be preserved when modifying the encoding.
    46  //
    47  // The bitmap for noscan spans is not maintained. Code must ensure
    48  // that an object is scannable before consulting its bitmap by
    49  // checking either the noscan bit in the span or by consulting its
    50  // type's information.
    51  //
    52  // Checkmarks
    53  //
    54  // In a concurrent garbage collector, one worries about failing to mark
    55  // a live object due to mutations without write barriers or bugs in the
    56  // collector implementation. As a sanity check, the GC has a 'checkmark'
    57  // mode that retraverses the object graph with the world stopped, to make
    58  // sure that everything that should be marked is marked.
    59  // In checkmark mode, in the heap bitmap, the high bit of the 2-bit entry
    60  // for the second word of the object holds the checkmark bit.
    61  // When not in checkmark mode, this bit is set to 1.
    62  //
    63  // The smallest possible allocation is 8 bytes. On a 32-bit machine, that
    64  // means every allocated object has two words, so there is room for the
    65  // checkmark bit. On a 64-bit machine, however, the 8-byte allocation is
    66  // just one word, so the second bit pair is not available for encoding the
    67  // checkmark. However, because non-pointer allocations are combined
    68  // into larger 16-byte (maxTinySize) allocations, a plain 8-byte allocation
    69  // must be a pointer, so the type bit in the first word is not actually needed.
    70  // It is still used in general, except in checkmark the type bit is repurposed
    71  // as the checkmark bit and then reinitialized (to 1) as the type bit when
    72  // finished.
    73  //
    74  
    75  package runtime
    76  
    77  import (
    78  	"runtime/internal/atomic"
    79  	"runtime/internal/sys"
    80  	"unsafe"
    81  )
    82  
    83  const (
    84  	bitPointer = 1 << 0
    85  	bitScan    = 1 << 4
    86  
    87  	heapBitsShift      = 1     // shift offset between successive bitPointer or bitScan entries
    88  	wordsPerBitmapByte = 8 / 2 // heap words described by one bitmap byte
    89  
    90  	// all scan/pointer bits in a byte
    91  	bitScanAll    = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
    92  	bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
    93  )
    94  
    95  // addb returns the byte pointer p+n.
    96  //go:nowritebarrier
    97  //go:nosplit
    98  func addb(p *byte, n uintptr) *byte {
    99  	// Note: wrote out full expression instead of calling add(p, n)
   100  	// to reduce the number of temporaries generated by the
   101  	// compiler for this trivial expression during inlining.
   102  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
   103  }
   104  
   105  // subtractb returns the byte pointer p-n.
   106  //go:nowritebarrier
   107  //go:nosplit
   108  func subtractb(p *byte, n uintptr) *byte {
   109  	// Note: wrote out full expression instead of calling add(p, -n)
   110  	// to reduce the number of temporaries generated by the
   111  	// compiler for this trivial expression during inlining.
   112  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
   113  }
   114  
   115  // add1 returns the byte pointer p+1.
   116  //go:nowritebarrier
   117  //go:nosplit
   118  func add1(p *byte) *byte {
   119  	// Note: wrote out full expression instead of calling addb(p, 1)
   120  	// to reduce the number of temporaries generated by the
   121  	// compiler for this trivial expression during inlining.
   122  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
   123  }
   124  
   125  // subtract1 returns the byte pointer p-1.
   126  //go:nowritebarrier
   127  //
   128  // nosplit because it is used during write barriers and must not be preempted.
   129  //go:nosplit
   130  func subtract1(p *byte) *byte {
   131  	// Note: wrote out full expression instead of calling subtractb(p, 1)
   132  	// to reduce the number of temporaries generated by the
   133  	// compiler for this trivial expression during inlining.
   134  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
   135  }
   136  
   137  // heapBits provides access to the bitmap bits for a single heap word.
   138  // The methods on heapBits take value receivers so that the compiler
   139  // can more easily inline calls to those methods and registerize the
   140  // struct fields independently.
   141  type heapBits struct {
   142  	bitp  *uint8
   143  	shift uint32
   144  	arena uint32 // Index of heap arena containing bitp
   145  	last  *uint8 // Last byte arena's bitmap
   146  }
   147  
   148  // Make the compiler check that heapBits.arena is large enough to hold
   149  // the maximum arena frame number.
   150  var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1}
   151  
   152  // markBits provides access to the mark bit for an object in the heap.
   153  // bytep points to the byte holding the mark bit.
   154  // mask is a byte with a single bit set that can be &ed with *bytep
   155  // to see if the bit has been set.
   156  // *m.byte&m.mask != 0 indicates the mark bit is set.
   157  // index can be used along with span information to generate
   158  // the address of the object in the heap.
   159  // We maintain one set of mark bits for allocation and one for
   160  // marking purposes.
   161  type markBits struct {
   162  	bytep *uint8
   163  	mask  uint8
   164  	index uintptr
   165  }
   166  
   167  //go:nosplit
   168  func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
   169  	bytep, mask := s.allocBits.bitp(allocBitIndex)
   170  	return markBits{bytep, mask, allocBitIndex}
   171  }
   172  
   173  // refillAllocCache takes 8 bytes s.allocBits starting at whichByte
   174  // and negates them so that ctz (count trailing zeros) instructions
   175  // can be used. It then places these 8 bytes into the cached 64 bit
   176  // s.allocCache.
   177  func (s *mspan) refillAllocCache(whichByte uintptr) {
   178  	bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
   179  	aCache := uint64(0)
   180  	aCache |= uint64(bytes[0])
   181  	aCache |= uint64(bytes[1]) << (1 * 8)
   182  	aCache |= uint64(bytes[2]) << (2 * 8)
   183  	aCache |= uint64(bytes[3]) << (3 * 8)
   184  	aCache |= uint64(bytes[4]) << (4 * 8)
   185  	aCache |= uint64(bytes[5]) << (5 * 8)
   186  	aCache |= uint64(bytes[6]) << (6 * 8)
   187  	aCache |= uint64(bytes[7]) << (7 * 8)
   188  	s.allocCache = ^aCache
   189  }
   190  
   191  // nextFreeIndex returns the index of the next free object in s at
   192  // or after s.freeindex.
   193  // There are hardware instructions that can be used to make this
   194  // faster if profiling warrants it.
   195  func (s *mspan) nextFreeIndex() uintptr {
   196  	sfreeindex := s.freeindex
   197  	snelems := s.nelems
   198  	if sfreeindex == snelems {
   199  		return sfreeindex
   200  	}
   201  	if sfreeindex > snelems {
   202  		throw("s.freeindex > s.nelems")
   203  	}
   204  
   205  	aCache := s.allocCache
   206  
   207  	bitIndex := sys.Ctz64(aCache)
   208  	for bitIndex == 64 {
   209  		// Move index to start of next cached bits.
   210  		sfreeindex = (sfreeindex + 64) &^ (64 - 1)
   211  		if sfreeindex >= snelems {
   212  			s.freeindex = snelems
   213  			return snelems
   214  		}
   215  		whichByte := sfreeindex / 8
   216  		// Refill s.allocCache with the next 64 alloc bits.
   217  		s.refillAllocCache(whichByte)
   218  		aCache = s.allocCache
   219  		bitIndex = sys.Ctz64(aCache)
   220  		// nothing available in cached bits
   221  		// grab the next 8 bytes and try again.
   222  	}
   223  	result := sfreeindex + uintptr(bitIndex)
   224  	if result >= snelems {
   225  		s.freeindex = snelems
   226  		return snelems
   227  	}
   228  
   229  	s.allocCache >>= uint(bitIndex + 1)
   230  	sfreeindex = result + 1
   231  
   232  	if sfreeindex%64 == 0 && sfreeindex != snelems {
   233  		// We just incremented s.freeindex so it isn't 0.
   234  		// As each 1 in s.allocCache was encountered and used for allocation
   235  		// it was shifted away. At this point s.allocCache contains all 0s.
   236  		// Refill s.allocCache so that it corresponds
   237  		// to the bits at s.allocBits starting at s.freeindex.
   238  		whichByte := sfreeindex / 8
   239  		s.refillAllocCache(whichByte)
   240  	}
   241  	s.freeindex = sfreeindex
   242  	return result
   243  }
   244  
   245  // isFree reports whether the index'th object in s is unallocated.
   246  //
   247  // The caller must ensure s.state is mSpanInUse, and there must have
   248  // been no preemption points since ensuring this (which could allow a
   249  // GC transition, which would allow the state to change).
   250  func (s *mspan) isFree(index uintptr) bool {
   251  	if index < s.freeindex {
   252  		return false
   253  	}
   254  	bytep, mask := s.allocBits.bitp(index)
   255  	return *bytep&mask == 0
   256  }
   257  
   258  func (s *mspan) objIndex(p uintptr) uintptr {
   259  	byteOffset := p - s.base()
   260  	if byteOffset == 0 {
   261  		return 0
   262  	}
   263  	if s.baseMask != 0 {
   264  		// s.baseMask is non-0, elemsize is a power of two, so shift by s.divShift
   265  		return byteOffset >> s.divShift
   266  	}
   267  	return uintptr(((uint64(byteOffset) >> s.divShift) * uint64(s.divMul)) >> s.divShift2)
   268  }
   269  
   270  func markBitsForAddr(p uintptr) markBits {
   271  	s := spanOf(p)
   272  	objIndex := s.objIndex(p)
   273  	return s.markBitsForIndex(objIndex)
   274  }
   275  
   276  func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
   277  	bytep, mask := s.gcmarkBits.bitp(objIndex)
   278  	return markBits{bytep, mask, objIndex}
   279  }
   280  
   281  func (s *mspan) markBitsForBase() markBits {
   282  	return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
   283  }
   284  
   285  // isMarked reports whether mark bit m is set.
   286  func (m markBits) isMarked() bool {
   287  	return *m.bytep&m.mask != 0
   288  }
   289  
   290  // setMarked sets the marked bit in the markbits, atomically.
   291  func (m markBits) setMarked() {
   292  	// Might be racing with other updates, so use atomic update always.
   293  	// We used to be clever here and use a non-atomic update in certain
   294  	// cases, but it's not worth the risk.
   295  	atomic.Or8(m.bytep, m.mask)
   296  }
   297  
   298  // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
   299  func (m markBits) setMarkedNonAtomic() {
   300  	*m.bytep |= m.mask
   301  }
   302  
   303  // clearMarked clears the marked bit in the markbits, atomically.
   304  func (m markBits) clearMarked() {
   305  	// Might be racing with other updates, so use atomic update always.
   306  	// We used to be clever here and use a non-atomic update in certain
   307  	// cases, but it's not worth the risk.
   308  	atomic.And8(m.bytep, ^m.mask)
   309  }
   310  
   311  // markBitsForSpan returns the markBits for the span base address base.
   312  func markBitsForSpan(base uintptr) (mbits markBits) {
   313  	mbits = markBitsForAddr(base)
   314  	if mbits.mask != 1 {
   315  		throw("markBitsForSpan: unaligned start")
   316  	}
   317  	return mbits
   318  }
   319  
   320  // advance advances the markBits to the next object in the span.
   321  func (m *markBits) advance() {
   322  	if m.mask == 1<<7 {
   323  		m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
   324  		m.mask = 1
   325  	} else {
   326  		m.mask = m.mask << 1
   327  	}
   328  	m.index++
   329  }
   330  
   331  // heapBitsForAddr returns the heapBits for the address addr.
   332  // The caller must ensure addr is in an allocated span.
   333  // In particular, be careful not to point past the end of an object.
   334  //
   335  // nosplit because it is used during write barriers and must not be preempted.
   336  //go:nosplit
   337  func heapBitsForAddr(addr uintptr) (h heapBits) {
   338  	// 2 bits per word, 4 pairs per byte, and a mask is hard coded.
   339  	arena := arenaIndex(addr)
   340  	ha := mheap_.arenas[arena.l1()][arena.l2()]
   341  	// The compiler uses a load for nil checking ha, but in this
   342  	// case we'll almost never hit that cache line again, so it
   343  	// makes more sense to do a value check.
   344  	if ha == nil {
   345  		// addr is not in the heap. Return nil heapBits, which
   346  		// we expect to crash in the caller.
   347  		return
   348  	}
   349  	h.bitp = &ha.bitmap[(addr/(sys.PtrSize*4))%heapArenaBitmapBytes]
   350  	h.shift = uint32((addr / sys.PtrSize) & 3)
   351  	h.arena = uint32(arena)
   352  	h.last = &ha.bitmap[len(ha.bitmap)-1]
   353  	return
   354  }
   355  
   356  // badPointer throws bad pointer in heap panic.
   357  func badPointer(s *mspan, p, refBase, refOff uintptr) {
   358  	// Typically this indicates an incorrect use
   359  	// of unsafe or cgo to store a bad pointer in
   360  	// the Go heap. It may also indicate a runtime
   361  	// bug.
   362  	//
   363  	// TODO(austin): We could be more aggressive
   364  	// and detect pointers to unallocated objects
   365  	// in allocated spans.
   366  	printlock()
   367  	print("runtime: pointer ", hex(p))
   368  	state := s.state.get()
   369  	if state != mSpanInUse {
   370  		print(" to unallocated span")
   371  	} else {
   372  		print(" to unused region of span")
   373  	}
   374  	print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state, "\n")
   375  	if refBase != 0 {
   376  		print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
   377  		gcDumpObject("object", refBase, refOff)
   378  	}
   379  	getg().m.traceback = 2
   380  	throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
   381  }
   382  
   383  // findObject returns the base address for the heap object containing
   384  // the address p, the object's span, and the index of the object in s.
   385  // If p does not point into a heap object, it returns base == 0.
   386  //
   387  // If p points is an invalid heap pointer and debug.invalidptr != 0,
   388  // findObject panics.
   389  //
   390  // refBase and refOff optionally give the base address of the object
   391  // in which the pointer p was found and the byte offset at which it
   392  // was found. These are used for error reporting.
   393  //
   394  // It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
   395  // Since p is a uintptr, it would not be adjusted if the stack were to move.
   396  //go:nosplit
   397  func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
   398  	s = spanOf(p)
   399  	// If s is nil, the virtual address has never been part of the heap.
   400  	// This pointer may be to some mmap'd region, so we allow it.
   401  	if s == nil {
   402  		return
   403  	}
   404  	// If p is a bad pointer, it may not be in s's bounds.
   405  	//
   406  	// Check s.state to synchronize with span initialization
   407  	// before checking other fields. See also spanOfHeap.
   408  	if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
   409  		// Pointers into stacks are also ok, the runtime manages these explicitly.
   410  		if state == mSpanManual {
   411  			return
   412  		}
   413  		// The following ensures that we are rigorous about what data
   414  		// structures hold valid pointers.
   415  		if debug.invalidptr != 0 {
   416  			badPointer(s, p, refBase, refOff)
   417  		}
   418  		return
   419  	}
   420  	// If this span holds object of a power of 2 size, just mask off the bits to
   421  	// the interior of the object. Otherwise use the size to get the base.
   422  	if s.baseMask != 0 {
   423  		// optimize for power of 2 sized objects.
   424  		base = s.base()
   425  		base = base + (p-base)&uintptr(s.baseMask)
   426  		objIndex = (base - s.base()) >> s.divShift
   427  		// base = p & s.baseMask is faster for small spans,
   428  		// but doesn't work for large spans.
   429  		// Overall, it's faster to use the more general computation above.
   430  	} else {
   431  		base = s.base()
   432  		if p-base >= s.elemsize {
   433  			// n := (p - base) / s.elemsize, using division by multiplication
   434  			objIndex = uintptr(p-base) >> s.divShift * uintptr(s.divMul) >> s.divShift2
   435  			base += objIndex * s.elemsize
   436  		}
   437  	}
   438  	return
   439  }
   440  
   441  // next returns the heapBits describing the next pointer-sized word in memory.
   442  // That is, if h describes address p, h.next() describes p+ptrSize.
   443  // Note that next does not modify h. The caller must record the result.
   444  //
   445  // nosplit because it is used during write barriers and must not be preempted.
   446  //go:nosplit
   447  func (h heapBits) next() heapBits {
   448  	if h.shift < 3*heapBitsShift {
   449  		h.shift += heapBitsShift
   450  	} else if h.bitp != h.last {
   451  		h.bitp, h.shift = add1(h.bitp), 0
   452  	} else {
   453  		// Move to the next arena.
   454  		return h.nextArena()
   455  	}
   456  	return h
   457  }
   458  
   459  // nextArena advances h to the beginning of the next heap arena.
   460  //
   461  // This is a slow-path helper to next. gc's inliner knows that
   462  // heapBits.next can be inlined even though it calls this. This is
   463  // marked noinline so it doesn't get inlined into next and cause next
   464  // to be too big to inline.
   465  //
   466  //go:nosplit
   467  //go:noinline
   468  func (h heapBits) nextArena() heapBits {
   469  	h.arena++
   470  	ai := arenaIdx(h.arena)
   471  	l2 := mheap_.arenas[ai.l1()]
   472  	if l2 == nil {
   473  		// We just passed the end of the object, which
   474  		// was also the end of the heap. Poison h. It
   475  		// should never be dereferenced at this point.
   476  		return heapBits{}
   477  	}
   478  	ha := l2[ai.l2()]
   479  	if ha == nil {
   480  		return heapBits{}
   481  	}
   482  	h.bitp, h.shift = &ha.bitmap[0], 0
   483  	h.last = &ha.bitmap[len(ha.bitmap)-1]
   484  	return h
   485  }
   486  
   487  // forward returns the heapBits describing n pointer-sized words ahead of h in memory.
   488  // That is, if h describes address p, h.forward(n) describes p+n*ptrSize.
   489  // h.forward(1) is equivalent to h.next(), just slower.
   490  // Note that forward does not modify h. The caller must record the result.
   491  // bits returns the heap bits for the current word.
   492  //go:nosplit
   493  func (h heapBits) forward(n uintptr) heapBits {
   494  	n += uintptr(h.shift) / heapBitsShift
   495  	nbitp := uintptr(unsafe.Pointer(h.bitp)) + n/4
   496  	h.shift = uint32(n%4) * heapBitsShift
   497  	if nbitp <= uintptr(unsafe.Pointer(h.last)) {
   498  		h.bitp = (*uint8)(unsafe.Pointer(nbitp))
   499  		return h
   500  	}
   501  
   502  	// We're in a new heap arena.
   503  	past := nbitp - (uintptr(unsafe.Pointer(h.last)) + 1)
   504  	h.arena += 1 + uint32(past/heapArenaBitmapBytes)
   505  	ai := arenaIdx(h.arena)
   506  	if l2 := mheap_.arenas[ai.l1()]; l2 != nil && l2[ai.l2()] != nil {
   507  		a := l2[ai.l2()]
   508  		h.bitp = &a.bitmap[past%heapArenaBitmapBytes]
   509  		h.last = &a.bitmap[len(a.bitmap)-1]
   510  	} else {
   511  		h.bitp, h.last = nil, nil
   512  	}
   513  	return h
   514  }
   515  
   516  // forwardOrBoundary is like forward, but stops at boundaries between
   517  // contiguous sections of the bitmap. It returns the number of words
   518  // advanced over, which will be <= n.
   519  func (h heapBits) forwardOrBoundary(n uintptr) (heapBits, uintptr) {
   520  	maxn := 4 * ((uintptr(unsafe.Pointer(h.last)) + 1) - uintptr(unsafe.Pointer(h.bitp)))
   521  	if n > maxn {
   522  		n = maxn
   523  	}
   524  	return h.forward(n), n
   525  }
   526  
   527  // The caller can test morePointers and isPointer by &-ing with bitScan and bitPointer.
   528  // The result includes in its higher bits the bits for subsequent words
   529  // described by the same bitmap byte.
   530  //
   531  // nosplit because it is used during write barriers and must not be preempted.
   532  //go:nosplit
   533  func (h heapBits) bits() uint32 {
   534  	// The (shift & 31) eliminates a test and conditional branch
   535  	// from the generated code.
   536  	return uint32(*h.bitp) >> (h.shift & 31)
   537  }
   538  
   539  // morePointers reports whether this word and all remaining words in this object
   540  // are scalars.
   541  // h must not describe the second word of the object.
   542  func (h heapBits) morePointers() bool {
   543  	return h.bits()&bitScan != 0
   544  }
   545  
   546  // isPointer reports whether the heap bits describe a pointer word.
   547  //
   548  // nosplit because it is used during write barriers and must not be preempted.
   549  //go:nosplit
   550  func (h heapBits) isPointer() bool {
   551  	return h.bits()&bitPointer != 0
   552  }
   553  
   554  // isCheckmarked reports whether the heap bits have the checkmarked bit set.
   555  // It must be told how large the object at h is, because the encoding of the
   556  // checkmark bit varies by size.
   557  // h must describe the initial word of the object.
   558  func (h heapBits) isCheckmarked(size uintptr) bool {
   559  	if size == sys.PtrSize {
   560  		return (*h.bitp>>h.shift)&bitPointer != 0
   561  	}
   562  	// All multiword objects are 2-word aligned,
   563  	// so we know that the initial word's 2-bit pair
   564  	// and the second word's 2-bit pair are in the
   565  	// same heap bitmap byte, *h.bitp.
   566  	return (*h.bitp>>(heapBitsShift+h.shift))&bitScan != 0
   567  }
   568  
   569  // setCheckmarked sets the checkmarked bit.
   570  // It must be told how large the object at h is, because the encoding of the
   571  // checkmark bit varies by size.
   572  // h must describe the initial word of the object.
   573  func (h heapBits) setCheckmarked(size uintptr) {
   574  	if size == sys.PtrSize {
   575  		atomic.Or8(h.bitp, bitPointer<<h.shift)
   576  		return
   577  	}
   578  	atomic.Or8(h.bitp, bitScan<<(heapBitsShift+h.shift))
   579  }
   580  
   581  // bulkBarrierPreWrite executes a write barrier
   582  // for every pointer slot in the memory range [src, src+size),
   583  // using pointer/scalar information from [dst, dst+size).
   584  // This executes the write barriers necessary before a memmove.
   585  // src, dst, and size must be pointer-aligned.
   586  // The range [dst, dst+size) must lie within a single object.
   587  // It does not perform the actual writes.
   588  //
   589  // As a special case, src == 0 indicates that this is being used for a
   590  // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
   591  // barrier.
   592  //
   593  // Callers should call bulkBarrierPreWrite immediately before
   594  // calling memmove(dst, src, size). This function is marked nosplit
   595  // to avoid being preempted; the GC must not stop the goroutine
   596  // between the memmove and the execution of the barriers.
   597  // The caller is also responsible for cgo pointer checks if this
   598  // may be writing Go pointers into non-Go memory.
   599  //
   600  // The pointer bitmap is not maintained for allocations containing
   601  // no pointers at all; any caller of bulkBarrierPreWrite must first
   602  // make sure the underlying allocation contains pointers, usually
   603  // by checking typ.ptrdata.
   604  //
   605  // Callers must perform cgo checks if writeBarrier.cgo.
   606  //
   607  //go:nosplit
   608  func bulkBarrierPreWrite(dst, src, size uintptr) {
   609  	if (dst|src|size)&(sys.PtrSize-1) != 0 {
   610  		throw("bulkBarrierPreWrite: unaligned arguments")
   611  	}
   612  	if !writeBarrier.needed {
   613  		return
   614  	}
   615  	if s := spanOf(dst); s == nil {
   616  		// If dst is a global, use the data or BSS bitmaps to
   617  		// execute write barriers.
   618  		for _, datap := range activeModules() {
   619  			if datap.data <= dst && dst < datap.edata {
   620  				bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
   621  				return
   622  			}
   623  		}
   624  		for _, datap := range activeModules() {
   625  			if datap.bss <= dst && dst < datap.ebss {
   626  				bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
   627  				return
   628  			}
   629  		}
   630  		return
   631  	} else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
   632  		// dst was heap memory at some point, but isn't now.
   633  		// It can't be a global. It must be either our stack,
   634  		// or in the case of direct channel sends, it could be
   635  		// another stack. Either way, no need for barriers.
   636  		// This will also catch if dst is in a freed span,
   637  		// though that should never have.
   638  		return
   639  	}
   640  
   641  	buf := &getg().m.p.ptr().wbBuf
   642  	h := heapBitsForAddr(dst)
   643  	if src == 0 {
   644  		for i := uintptr(0); i < size; i += sys.PtrSize {
   645  			if h.isPointer() {
   646  				dstx := (*uintptr)(unsafe.Pointer(dst + i))
   647  				if !buf.putFast(*dstx, 0) {
   648  					wbBufFlush(nil, 0)
   649  				}
   650  			}
   651  			h = h.next()
   652  		}
   653  	} else {
   654  		for i := uintptr(0); i < size; i += sys.PtrSize {
   655  			if h.isPointer() {
   656  				dstx := (*uintptr)(unsafe.Pointer(dst + i))
   657  				srcx := (*uintptr)(unsafe.Pointer(src + i))
   658  				if !buf.putFast(*dstx, *srcx) {
   659  					wbBufFlush(nil, 0)
   660  				}
   661  			}
   662  			h = h.next()
   663  		}
   664  	}
   665  }
   666  
   667  // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
   668  // does not execute write barriers for [dst, dst+size).
   669  //
   670  // In addition to the requirements of bulkBarrierPreWrite
   671  // callers need to ensure [dst, dst+size) is zeroed.
   672  //
   673  // This is used for special cases where e.g. dst was just
   674  // created and zeroed with malloc.
   675  //go:nosplit
   676  func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
   677  	if (dst|src|size)&(sys.PtrSize-1) != 0 {
   678  		throw("bulkBarrierPreWrite: unaligned arguments")
   679  	}
   680  	if !writeBarrier.needed {
   681  		return
   682  	}
   683  	buf := &getg().m.p.ptr().wbBuf
   684  	h := heapBitsForAddr(dst)
   685  	for i := uintptr(0); i < size; i += sys.PtrSize {
   686  		if h.isPointer() {
   687  			srcx := (*uintptr)(unsafe.Pointer(src + i))
   688  			if !buf.putFast(0, *srcx) {
   689  				wbBufFlush(nil, 0)
   690  			}
   691  		}
   692  		h = h.next()
   693  	}
   694  }
   695  
   696  // bulkBarrierBitmap executes write barriers for copying from [src,
   697  // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
   698  // assumed to start maskOffset bytes into the data covered by the
   699  // bitmap in bits (which may not be a multiple of 8).
   700  //
   701  // This is used by bulkBarrierPreWrite for writes to data and BSS.
   702  //
   703  //go:nosplit
   704  func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
   705  	word := maskOffset / sys.PtrSize
   706  	bits = addb(bits, word/8)
   707  	mask := uint8(1) << (word % 8)
   708  
   709  	buf := &getg().m.p.ptr().wbBuf
   710  	for i := uintptr(0); i < size; i += sys.PtrSize {
   711  		if mask == 0 {
   712  			bits = addb(bits, 1)
   713  			if *bits == 0 {
   714  				// Skip 8 words.
   715  				i += 7 * sys.PtrSize
   716  				continue
   717  			}
   718  			mask = 1
   719  		}
   720  		if *bits&mask != 0 {
   721  			dstx := (*uintptr)(unsafe.Pointer(dst + i))
   722  			if src == 0 {
   723  				if !buf.putFast(*dstx, 0) {
   724  					wbBufFlush(nil, 0)
   725  				}
   726  			} else {
   727  				srcx := (*uintptr)(unsafe.Pointer(src + i))
   728  				if !buf.putFast(*dstx, *srcx) {
   729  					wbBufFlush(nil, 0)
   730  				}
   731  			}
   732  		}
   733  		mask <<= 1
   734  	}
   735  }
   736  
   737  // typeBitsBulkBarrier executes a write barrier for every
   738  // pointer that would be copied from [src, src+size) to [dst,
   739  // dst+size) by a memmove using the type bitmap to locate those
   740  // pointer slots.
   741  //
   742  // The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
   743  // dst, src, and size must be pointer-aligned.
   744  // The type typ must have a plain bitmap, not a GC program.
   745  // The only use of this function is in channel sends, and the
   746  // 64 kB channel element limit takes care of this for us.
   747  //
   748  // Must not be preempted because it typically runs right before memmove,
   749  // and the GC must observe them as an atomic action.
   750  //
   751  // Callers must perform cgo checks if writeBarrier.cgo.
   752  //
   753  //go:nosplit
   754  func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
   755  	if typ == nil {
   756  		throw("runtime: typeBitsBulkBarrier without type")
   757  	}
   758  	if typ.size != size {
   759  		println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
   760  		throw("runtime: invalid typeBitsBulkBarrier")
   761  	}
   762  	if typ.kind&kindGCProg != 0 {
   763  		println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
   764  		throw("runtime: invalid typeBitsBulkBarrier")
   765  	}
   766  	if !writeBarrier.needed {
   767  		return
   768  	}
   769  	ptrmask := typ.gcdata
   770  	buf := &getg().m.p.ptr().wbBuf
   771  	var bits uint32
   772  	for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
   773  		if i&(sys.PtrSize*8-1) == 0 {
   774  			bits = uint32(*ptrmask)
   775  			ptrmask = addb(ptrmask, 1)
   776  		} else {
   777  			bits = bits >> 1
   778  		}
   779  		if bits&1 != 0 {
   780  			dstx := (*uintptr)(unsafe.Pointer(dst + i))
   781  			srcx := (*uintptr)(unsafe.Pointer(src + i))
   782  			if !buf.putFast(*dstx, *srcx) {
   783  				wbBufFlush(nil, 0)
   784  			}
   785  		}
   786  	}
   787  }
   788  
   789  // The methods operating on spans all require that h has been returned
   790  // by heapBitsForSpan and that size, n, total are the span layout description
   791  // returned by the mspan's layout method.
   792  // If total > size*n, it means that there is extra leftover memory in the span,
   793  // usually due to rounding.
   794  //
   795  // TODO(rsc): Perhaps introduce a different heapBitsSpan type.
   796  
   797  // initSpan initializes the heap bitmap for a span.
   798  // It clears all checkmark bits.
   799  // If this is a span of pointer-sized objects, it initializes all
   800  // words to pointer/scan.
   801  // Otherwise, it initializes all words to scalar/dead.
   802  func (h heapBits) initSpan(s *mspan) {
   803  	// Clear bits corresponding to objects.
   804  	nw := (s.npages << _PageShift) / sys.PtrSize
   805  	if nw%wordsPerBitmapByte != 0 {
   806  		throw("initSpan: unaligned length")
   807  	}
   808  	if h.shift != 0 {
   809  		throw("initSpan: unaligned base")
   810  	}
   811  	isPtrs := sys.PtrSize == 8 && s.elemsize == sys.PtrSize
   812  	for nw > 0 {
   813  		hNext, anw := h.forwardOrBoundary(nw)
   814  		nbyte := anw / wordsPerBitmapByte
   815  		if isPtrs {
   816  			bitp := h.bitp
   817  			for i := uintptr(0); i < nbyte; i++ {
   818  				*bitp = bitPointerAll | bitScanAll
   819  				bitp = add1(bitp)
   820  			}
   821  		} else {
   822  			memclrNoHeapPointers(unsafe.Pointer(h.bitp), nbyte)
   823  		}
   824  		h = hNext
   825  		nw -= anw
   826  	}
   827  }
   828  
   829  // initCheckmarkSpan initializes a span for being checkmarked.
   830  // It clears the checkmark bits, which are set to 1 in normal operation.
   831  func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
   832  	// The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
   833  	if sys.PtrSize == 8 && size == sys.PtrSize {
   834  		// Checkmark bit is type bit, bottom bit of every 2-bit entry.
   835  		// Only possible on 64-bit system, since minimum size is 8.
   836  		// Must clear type bit (checkmark bit) of every word.
   837  		// The type bit is the lower of every two-bit pair.
   838  		for i := uintptr(0); i < n; i += wordsPerBitmapByte {
   839  			*h.bitp &^= bitPointerAll
   840  			h = h.forward(wordsPerBitmapByte)
   841  		}
   842  		return
   843  	}
   844  	for i := uintptr(0); i < n; i++ {
   845  		*h.bitp &^= bitScan << (heapBitsShift + h.shift)
   846  		h = h.forward(size / sys.PtrSize)
   847  	}
   848  }
   849  
   850  // clearCheckmarkSpan undoes all the checkmarking in a span.
   851  // The actual checkmark bits are ignored, so the only work to do
   852  // is to fix the pointer bits. (Pointer bits are ignored by scanobject
   853  // but consulted by typedmemmove.)
   854  func (h heapBits) clearCheckmarkSpan(size, n, total uintptr) {
   855  	// The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
   856  	if sys.PtrSize == 8 && size == sys.PtrSize {
   857  		// Checkmark bit is type bit, bottom bit of every 2-bit entry.
   858  		// Only possible on 64-bit system, since minimum size is 8.
   859  		// Must clear type bit (checkmark bit) of every word.
   860  		// The type bit is the lower of every two-bit pair.
   861  		for i := uintptr(0); i < n; i += wordsPerBitmapByte {
   862  			*h.bitp |= bitPointerAll
   863  			h = h.forward(wordsPerBitmapByte)
   864  		}
   865  	}
   866  }
   867  
   868  // oneBitCount is indexed by byte and produces the
   869  // number of 1 bits in that byte. For example 128 has 1 bit set
   870  // and oneBitCount[128] will holds 1.
   871  var oneBitCount = [256]uint8{
   872  	0, 1, 1, 2, 1, 2, 2, 3,
   873  	1, 2, 2, 3, 2, 3, 3, 4,
   874  	1, 2, 2, 3, 2, 3, 3, 4,
   875  	2, 3, 3, 4, 3, 4, 4, 5,
   876  	1, 2, 2, 3, 2, 3, 3, 4,
   877  	2, 3, 3, 4, 3, 4, 4, 5,
   878  	2, 3, 3, 4, 3, 4, 4, 5,
   879  	3, 4, 4, 5, 4, 5, 5, 6,
   880  	1, 2, 2, 3, 2, 3, 3, 4,
   881  	2, 3, 3, 4, 3, 4, 4, 5,
   882  	2, 3, 3, 4, 3, 4, 4, 5,
   883  	3, 4, 4, 5, 4, 5, 5, 6,
   884  	2, 3, 3, 4, 3, 4, 4, 5,
   885  	3, 4, 4, 5, 4, 5, 5, 6,
   886  	3, 4, 4, 5, 4, 5, 5, 6,
   887  	4, 5, 5, 6, 5, 6, 6, 7,
   888  	1, 2, 2, 3, 2, 3, 3, 4,
   889  	2, 3, 3, 4, 3, 4, 4, 5,
   890  	2, 3, 3, 4, 3, 4, 4, 5,
   891  	3, 4, 4, 5, 4, 5, 5, 6,
   892  	2, 3, 3, 4, 3, 4, 4, 5,
   893  	3, 4, 4, 5, 4, 5, 5, 6,
   894  	3, 4, 4, 5, 4, 5, 5, 6,
   895  	4, 5, 5, 6, 5, 6, 6, 7,
   896  	2, 3, 3, 4, 3, 4, 4, 5,
   897  	3, 4, 4, 5, 4, 5, 5, 6,
   898  	3, 4, 4, 5, 4, 5, 5, 6,
   899  	4, 5, 5, 6, 5, 6, 6, 7,
   900  	3, 4, 4, 5, 4, 5, 5, 6,
   901  	4, 5, 5, 6, 5, 6, 6, 7,
   902  	4, 5, 5, 6, 5, 6, 6, 7,
   903  	5, 6, 6, 7, 6, 7, 7, 8}
   904  
   905  // countAlloc returns the number of objects allocated in span s by
   906  // scanning the allocation bitmap.
   907  // TODO:(rlh) Use popcount intrinsic.
   908  func (s *mspan) countAlloc() int {
   909  	count := 0
   910  	maxIndex := s.nelems / 8
   911  	for i := uintptr(0); i < maxIndex; i++ {
   912  		mrkBits := *s.gcmarkBits.bytep(i)
   913  		count += int(oneBitCount[mrkBits])
   914  	}
   915  	if bitsInLastByte := s.nelems % 8; bitsInLastByte != 0 {
   916  		mrkBits := *s.gcmarkBits.bytep(maxIndex)
   917  		mask := uint8((1 << bitsInLastByte) - 1)
   918  		bits := mrkBits & mask
   919  		count += int(oneBitCount[bits])
   920  	}
   921  	return count
   922  }
   923  
   924  // heapBitsSetType records that the new allocation [x, x+size)
   925  // holds in [x, x+dataSize) one or more values of type typ.
   926  // (The number of values is given by dataSize / typ.size.)
   927  // If dataSize < size, the fragment [x+dataSize, x+size) is
   928  // recorded as non-pointer data.
   929  // It is known that the type has pointers somewhere;
   930  // malloc does not call heapBitsSetType when there are no pointers,
   931  // because all free objects are marked as noscan during
   932  // heapBitsSweepSpan.
   933  //
   934  // There can only be one allocation from a given span active at a time,
   935  // and the bitmap for a span always falls on byte boundaries,
   936  // so there are no write-write races for access to the heap bitmap.
   937  // Hence, heapBitsSetType can access the bitmap without atomics.
   938  //
   939  // There can be read-write races between heapBitsSetType and things
   940  // that read the heap bitmap like scanobject. However, since
   941  // heapBitsSetType is only used for objects that have not yet been
   942  // made reachable, readers will ignore bits being modified by this
   943  // function. This does mean this function cannot transiently modify
   944  // bits that belong to neighboring objects. Also, on weakly-ordered
   945  // machines, callers must execute a store/store (publication) barrier
   946  // between calling this function and making the object reachable.
   947  func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
   948  	const doubleCheck = false // slow but helpful; enable to test modifications to this code
   949  
   950  	// dataSize is always size rounded up to the next malloc size class,
   951  	// except in the case of allocating a defer block, in which case
   952  	// size is sizeof(_defer{}) (at least 6 words) and dataSize may be
   953  	// arbitrarily larger.
   954  	//
   955  	// The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore
   956  	// assume that dataSize == size without checking it explicitly.
   957  
   958  	if sys.PtrSize == 8 && size == sys.PtrSize {
   959  		// It's one word and it has pointers, it must be a pointer.
   960  		// Since all allocated one-word objects are pointers
   961  		// (non-pointers are aggregated into tinySize allocations),
   962  		// initSpan sets the pointer bits for us. Nothing to do here.
   963  		if doubleCheck {
   964  			h := heapBitsForAddr(x)
   965  			if !h.isPointer() {
   966  				throw("heapBitsSetType: pointer bit missing")
   967  			}
   968  			if !h.morePointers() {
   969  				throw("heapBitsSetType: scan bit missing")
   970  			}
   971  		}
   972  		return
   973  	}
   974  
   975  	h := heapBitsForAddr(x)
   976  	ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below)
   977  
   978  	// Heap bitmap bits for 2-word object are only 4 bits,
   979  	// so also shared with objects next to it.
   980  	// This is called out as a special case primarily for 32-bit systems,
   981  	// so that on 32-bit systems the code below can assume all objects
   982  	// are 4-word aligned (because they're all 16-byte aligned).
   983  	if size == 2*sys.PtrSize {
   984  		if typ.size == sys.PtrSize {
   985  			// We're allocating a block big enough to hold two pointers.
   986  			// On 64-bit, that means the actual object must be two pointers,
   987  			// or else we'd have used the one-pointer-sized block.
   988  			// On 32-bit, however, this is the 8-byte block, the smallest one.
   989  			// So it could be that we're allocating one pointer and this was
   990  			// just the smallest block available. Distinguish by checking dataSize.
   991  			// (In general the number of instances of typ being allocated is
   992  			// dataSize/typ.size.)
   993  			if sys.PtrSize == 4 && dataSize == sys.PtrSize {
   994  				// 1 pointer object. On 32-bit machines clear the bit for the
   995  				// unused second word.
   996  				*h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
   997  				*h.bitp |= (bitPointer | bitScan) << h.shift
   998  			} else {
   999  				// 2-element slice of pointer.
  1000  				*h.bitp |= (bitPointer | bitScan | bitPointer<<heapBitsShift) << h.shift
  1001  			}
  1002  			return
  1003  		}
  1004  		// Otherwise typ.size must be 2*sys.PtrSize,
  1005  		// and typ.kind&kindGCProg == 0.
  1006  		if doubleCheck {
  1007  			if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
  1008  				print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
  1009  				throw("heapBitsSetType")
  1010  			}
  1011  		}
  1012  		b := uint32(*ptrmask)
  1013  		hb := (b & 3) | bitScan
  1014  		// bitPointer == 1, bitScan is 1 << 4, heapBitsShift is 1.
  1015  		// 110011 is shifted h.shift and complemented.
  1016  		// This clears out the bits that are about to be
  1017  		// ored into *h.hbitp in the next instructions.
  1018  		*h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
  1019  		*h.bitp |= uint8(hb << h.shift)
  1020  		return
  1021  	}
  1022  
  1023  	// Copy from 1-bit ptrmask into 2-bit bitmap.
  1024  	// The basic approach is to use a single uintptr as a bit buffer,
  1025  	// alternating between reloading the buffer and writing bitmap bytes.
  1026  	// In general, one load can supply two bitmap byte writes.
  1027  	// This is a lot of lines of code, but it compiles into relatively few
  1028  	// machine instructions.
  1029  
  1030  	outOfPlace := false
  1031  	if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrand()%2 == 0) {
  1032  		// This object spans heap arenas, so the bitmap may be
  1033  		// discontiguous. Unroll it into the object instead
  1034  		// and then copy it out.
  1035  		//
  1036  		// In doubleCheck mode, we randomly do this anyway to
  1037  		// stress test the bitmap copying path.
  1038  		outOfPlace = true
  1039  		h.bitp = (*uint8)(unsafe.Pointer(x))
  1040  		h.last = nil
  1041  	}
  1042  
  1043  	var (
  1044  		// Ptrmask input.
  1045  		p     *byte   // last ptrmask byte read
  1046  		b     uintptr // ptrmask bits already loaded
  1047  		nb    uintptr // number of bits in b at next read
  1048  		endp  *byte   // final ptrmask byte to read (then repeat)
  1049  		endnb uintptr // number of valid bits in *endp
  1050  		pbits uintptr // alternate source of bits
  1051  
  1052  		// Heap bitmap output.
  1053  		w     uintptr // words processed
  1054  		nw    uintptr // number of words to process
  1055  		hbitp *byte   // next heap bitmap byte to write
  1056  		hb    uintptr // bits being prepared for *hbitp
  1057  	)
  1058  
  1059  	hbitp = h.bitp
  1060  
  1061  	// Handle GC program. Delayed until this part of the code
  1062  	// so that we can use the same double-checking mechanism
  1063  	// as the 1-bit case. Nothing above could have encountered
  1064  	// GC programs: the cases were all too small.
  1065  	if typ.kind&kindGCProg != 0 {
  1066  		heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
  1067  		if doubleCheck {
  1068  			// Double-check the heap bits written by GC program
  1069  			// by running the GC program to create a 1-bit pointer mask
  1070  			// and then jumping to the double-check code below.
  1071  			// This doesn't catch bugs shared between the 1-bit and 4-bit
  1072  			// GC program execution, but it does catch mistakes specific
  1073  			// to just one of those and bugs in heapBitsSetTypeGCProg's
  1074  			// implementation of arrays.
  1075  			lock(&debugPtrmask.lock)
  1076  			if debugPtrmask.data == nil {
  1077  				debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
  1078  			}
  1079  			ptrmask = debugPtrmask.data
  1080  			runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
  1081  		}
  1082  		goto Phase4
  1083  	}
  1084  
  1085  	// Note about sizes:
  1086  	//
  1087  	// typ.size is the number of words in the object,
  1088  	// and typ.ptrdata is the number of words in the prefix
  1089  	// of the object that contains pointers. That is, the final
  1090  	// typ.size - typ.ptrdata words contain no pointers.
  1091  	// This allows optimization of a common pattern where
  1092  	// an object has a small header followed by a large scalar
  1093  	// buffer. If we know the pointers are over, we don't have
  1094  	// to scan the buffer's heap bitmap at all.
  1095  	// The 1-bit ptrmasks are sized to contain only bits for
  1096  	// the typ.ptrdata prefix, zero padded out to a full byte
  1097  	// of bitmap. This code sets nw (below) so that heap bitmap
  1098  	// bits are only written for the typ.ptrdata prefix; if there is
  1099  	// more room in the allocated object, the next heap bitmap
  1100  	// entry is a 00, indicating that there are no more pointers
  1101  	// to scan. So only the ptrmask for the ptrdata bytes is needed.
  1102  	//
  1103  	// Replicated copies are not as nice: if there is an array of
  1104  	// objects with scalar tails, all but the last tail does have to
  1105  	// be initialized, because there is no way to say "skip forward".
  1106  	// However, because of the possibility of a repeated type with
  1107  	// size not a multiple of 4 pointers (one heap bitmap byte),
  1108  	// the code already must handle the last ptrmask byte specially
  1109  	// by treating it as containing only the bits for endnb pointers,
  1110  	// where endnb <= 4. We represent large scalar tails that must
  1111  	// be expanded in the replication by setting endnb larger than 4.
  1112  	// This will have the effect of reading many bits out of b,
  1113  	// but once the real bits are shifted out, b will supply as many
  1114  	// zero bits as we try to read, which is exactly what we need.
  1115  
  1116  	p = ptrmask
  1117  	if typ.size < dataSize {
  1118  		// Filling in bits for an array of typ.
  1119  		// Set up for repetition of ptrmask during main loop.
  1120  		// Note that ptrmask describes only a prefix of
  1121  		const maxBits = sys.PtrSize*8 - 7
  1122  		if typ.ptrdata/sys.PtrSize <= maxBits {
  1123  			// Entire ptrmask fits in uintptr with room for a byte fragment.
  1124  			// Load into pbits and never read from ptrmask again.
  1125  			// This is especially important when the ptrmask has
  1126  			// fewer than 8 bits in it; otherwise the reload in the middle
  1127  			// of the Phase 2 loop would itself need to loop to gather
  1128  			// at least 8 bits.
  1129  
  1130  			// Accumulate ptrmask into b.
  1131  			// ptrmask is sized to describe only typ.ptrdata, but we record
  1132  			// it as describing typ.size bytes, since all the high bits are zero.
  1133  			nb = typ.ptrdata / sys.PtrSize
  1134  			for i := uintptr(0); i < nb; i += 8 {
  1135  				b |= uintptr(*p) << i
  1136  				p = add1(p)
  1137  			}
  1138  			nb = typ.size / sys.PtrSize
  1139  
  1140  			// Replicate ptrmask to fill entire pbits uintptr.
  1141  			// Doubling and truncating is fewer steps than
  1142  			// iterating by nb each time. (nb could be 1.)
  1143  			// Since we loaded typ.ptrdata/sys.PtrSize bits
  1144  			// but are pretending to have typ.size/sys.PtrSize,
  1145  			// there might be no replication necessary/possible.
  1146  			pbits = b
  1147  			endnb = nb
  1148  			if nb+nb <= maxBits {
  1149  				for endnb <= sys.PtrSize*8 {
  1150  					pbits |= pbits << endnb
  1151  					endnb += endnb
  1152  				}
  1153  				// Truncate to a multiple of original ptrmask.
  1154  				// Because nb+nb <= maxBits, nb fits in a byte.
  1155  				// Byte division is cheaper than uintptr division.
  1156  				endnb = uintptr(maxBits/byte(nb)) * nb
  1157  				pbits &= 1<<endnb - 1
  1158  				b = pbits
  1159  				nb = endnb
  1160  			}
  1161  
  1162  			// Clear p and endp as sentinel for using pbits.
  1163  			// Checked during Phase 2 loop.
  1164  			p = nil
  1165  			endp = nil
  1166  		} else {
  1167  			// Ptrmask is larger. Read it multiple times.
  1168  			n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
  1169  			endp = addb(ptrmask, n)
  1170  			endnb = typ.size/sys.PtrSize - n*8
  1171  		}
  1172  	}
  1173  	if p != nil {
  1174  		b = uintptr(*p)
  1175  		p = add1(p)
  1176  		nb = 8
  1177  	}
  1178  
  1179  	if typ.size == dataSize {
  1180  		// Single entry: can stop once we reach the non-pointer data.
  1181  		nw = typ.ptrdata / sys.PtrSize
  1182  	} else {
  1183  		// Repeated instances of typ in an array.
  1184  		// Have to process first N-1 entries in full, but can stop
  1185  		// once we reach the non-pointer data in the final entry.
  1186  		nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
  1187  	}
  1188  	if nw == 0 {
  1189  		// No pointers! Caller was supposed to check.
  1190  		println("runtime: invalid type ", typ.string())
  1191  		throw("heapBitsSetType: called with non-pointer type")
  1192  		return
  1193  	}
  1194  	if nw < 2 {
  1195  		// Must write at least 2 words, because the "no scan"
  1196  		// encoding doesn't take effect until the third word.
  1197  		nw = 2
  1198  	}
  1199  
  1200  	// Phase 1: Special case for leading byte (shift==0) or half-byte (shift==2).
  1201  	// The leading byte is special because it contains the bits for word 1,
  1202  	// which does not have the scan bit set.
  1203  	// The leading half-byte is special because it's a half a byte,
  1204  	// so we have to be careful with the bits already there.
  1205  	switch {
  1206  	default:
  1207  		throw("heapBitsSetType: unexpected shift")
  1208  
  1209  	case h.shift == 0:
  1210  		// Ptrmask and heap bitmap are aligned.
  1211  		// Handle first byte of bitmap specially.
  1212  		//
  1213  		// The first byte we write out covers the first four
  1214  		// words of the object. The scan/dead bit on the first
  1215  		// word must be set to scan since there are pointers
  1216  		// somewhere in the object. The scan/dead bit on the
  1217  		// second word is the checkmark, so we don't set it.
  1218  		// In all following words, we set the scan/dead
  1219  		// appropriately to indicate that the object contains
  1220  		// to the next 2-bit entry in the bitmap.
  1221  		//
  1222  		// TODO: It doesn't matter if we set the checkmark, so
  1223  		// maybe this case isn't needed any more.
  1224  		hb = b & bitPointerAll
  1225  		hb |= bitScan | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
  1226  		if w += 4; w >= nw {
  1227  			goto Phase3
  1228  		}
  1229  		*hbitp = uint8(hb)
  1230  		hbitp = add1(hbitp)
  1231  		b >>= 4
  1232  		nb -= 4
  1233  
  1234  	case sys.PtrSize == 8 && h.shift == 2:
  1235  		// Ptrmask and heap bitmap are misaligned.
  1236  		// The bits for the first two words are in a byte shared
  1237  		// with another object, so we must be careful with the bits
  1238  		// already there.
  1239  		// We took care of 1-word and 2-word objects above,
  1240  		// so this is at least a 6-word object.
  1241  		hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift)
  1242  		// This is not noscan, so set the scan bit in the
  1243  		// first word.
  1244  		hb |= bitScan << (2 * heapBitsShift)
  1245  		b >>= 2
  1246  		nb -= 2
  1247  		// Note: no bitScan for second word because that's
  1248  		// the checkmark.
  1249  		*hbitp &^= uint8((bitPointer | bitScan | (bitPointer << heapBitsShift)) << (2 * heapBitsShift))
  1250  		*hbitp |= uint8(hb)
  1251  		hbitp = add1(hbitp)
  1252  		if w += 2; w >= nw {
  1253  			// We know that there is more data, because we handled 2-word objects above.
  1254  			// This must be at least a 6-word object. If we're out of pointer words,
  1255  			// mark no scan in next bitmap byte and finish.
  1256  			hb = 0
  1257  			w += 4
  1258  			goto Phase3
  1259  		}
  1260  	}
  1261  
  1262  	// Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap.
  1263  	// The loop computes the bits for that last write but does not execute the write;
  1264  	// it leaves the bits in hb for processing by phase 3.
  1265  	// To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to
  1266  	// use in the first half of the loop right now, and then we only adjust nb explicitly
  1267  	// if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop.
  1268  	nb -= 4
  1269  	for {
  1270  		// Emit bitmap byte.
  1271  		// b has at least nb+4 bits, with one exception:
  1272  		// if w+4 >= nw, then b has only nw-w bits,
  1273  		// but we'll stop at the break and then truncate
  1274  		// appropriately in Phase 3.
  1275  		hb = b & bitPointerAll
  1276  		hb |= bitScanAll
  1277  		if w += 4; w >= nw {
  1278  			break
  1279  		}
  1280  		*hbitp = uint8(hb)
  1281  		hbitp = add1(hbitp)
  1282  		b >>= 4
  1283  
  1284  		// Load more bits. b has nb right now.
  1285  		if p != endp {
  1286  			// Fast path: keep reading from ptrmask.
  1287  			// nb unmodified: we just loaded 8 bits,
  1288  			// and the next iteration will consume 8 bits,
  1289  			// leaving us with the same nb the next time we're here.
  1290  			if nb < 8 {
  1291  				b |= uintptr(*p) << nb
  1292  				p = add1(p)
  1293  			} else {
  1294  				// Reduce the number of bits in b.
  1295  				// This is important if we skipped
  1296  				// over a scalar tail, since nb could
  1297  				// be larger than the bit width of b.
  1298  				nb -= 8
  1299  			}
  1300  		} else if p == nil {
  1301  			// Almost as fast path: track bit count and refill from pbits.
  1302  			// For short repetitions.
  1303  			if nb < 8 {
  1304  				b |= pbits << nb
  1305  				nb += endnb
  1306  			}
  1307  			nb -= 8 // for next iteration
  1308  		} else {
  1309  			// Slow path: reached end of ptrmask.
  1310  			// Process final partial byte and rewind to start.
  1311  			b |= uintptr(*p) << nb
  1312  			nb += endnb
  1313  			if nb < 8 {
  1314  				b |= uintptr(*ptrmask) << nb
  1315  				p = add1(ptrmask)
  1316  			} else {
  1317  				nb -= 8
  1318  				p = ptrmask
  1319  			}
  1320  		}
  1321  
  1322  		// Emit bitmap byte.
  1323  		hb = b & bitPointerAll
  1324  		hb |= bitScanAll
  1325  		if w += 4; w >= nw {
  1326  			break
  1327  		}
  1328  		*hbitp = uint8(hb)
  1329  		hbitp = add1(hbitp)
  1330  		b >>= 4
  1331  	}
  1332  
  1333  Phase3:
  1334  	// Phase 3: Write last byte or partial byte and zero the rest of the bitmap entries.
  1335  	if w > nw {
  1336  		// Counting the 4 entries in hb not yet written to memory,
  1337  		// there are more entries than possible pointer slots.
  1338  		// Discard the excess entries (can't be more than 3).
  1339  		mask := uintptr(1)<<(4-(w-nw)) - 1
  1340  		hb &= mask | mask<<4 // apply mask to both pointer bits and scan bits
  1341  	}
  1342  
  1343  	// Change nw from counting possibly-pointer words to total words in allocation.
  1344  	nw = size / sys.PtrSize
  1345  
  1346  	// Write whole bitmap bytes.
  1347  	// The first is hb, the rest are zero.
  1348  	if w <= nw {
  1349  		*hbitp = uint8(hb)
  1350  		hbitp = add1(hbitp)
  1351  		hb = 0 // for possible final half-byte below
  1352  		for w += 4; w <= nw; w += 4 {
  1353  			*hbitp = 0
  1354  			hbitp = add1(hbitp)
  1355  		}
  1356  	}
  1357  
  1358  	// Write final partial bitmap byte if any.
  1359  	// We know w > nw, or else we'd still be in the loop above.
  1360  	// It can be bigger only due to the 4 entries in hb that it counts.
  1361  	// If w == nw+4 then there's nothing left to do: we wrote all nw entries
  1362  	// and can discard the 4 sitting in hb.
  1363  	// But if w == nw+2, we need to write first two in hb.
  1364  	// The byte is shared with the next object, so be careful with
  1365  	// existing bits.
  1366  	if w == nw+2 {
  1367  		*hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb)
  1368  	}
  1369  
  1370  Phase4:
  1371  	// Phase 4: Copy unrolled bitmap to per-arena bitmaps, if necessary.
  1372  	if outOfPlace {
  1373  		// TODO: We could probably make this faster by
  1374  		// handling [x+dataSize, x+size) specially.
  1375  		h := heapBitsForAddr(x)
  1376  		// cnw is the number of heap words, or bit pairs
  1377  		// remaining (like nw above).
  1378  		cnw := size / sys.PtrSize
  1379  		src := (*uint8)(unsafe.Pointer(x))
  1380  		// We know the first and last byte of the bitmap are
  1381  		// not the same, but it's still possible for small
  1382  		// objects span arenas, so it may share bitmap bytes
  1383  		// with neighboring objects.
  1384  		//
  1385  		// Handle the first byte specially if it's shared. See
  1386  		// Phase 1 for why this is the only special case we need.
  1387  		if doubleCheck {
  1388  			if !(h.shift == 0 || (sys.PtrSize == 8 && h.shift == 2)) {
  1389  				print("x=", x, " size=", size, " cnw=", h.shift, "\n")
  1390  				throw("bad start shift")
  1391  			}
  1392  		}
  1393  		if sys.PtrSize == 8 && h.shift == 2 {
  1394  			*h.bitp = *h.bitp&^((bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift)<<(2*heapBitsShift)) | *src
  1395  			h = h.next().next()
  1396  			cnw -= 2
  1397  			src = addb(src, 1)
  1398  		}
  1399  		// We're now byte aligned. Copy out to per-arena
  1400  		// bitmaps until the last byte (which may again be
  1401  		// partial).
  1402  		for cnw >= 4 {
  1403  			// This loop processes four words at a time,
  1404  			// so round cnw down accordingly.
  1405  			hNext, words := h.forwardOrBoundary(cnw / 4 * 4)
  1406  
  1407  			// n is the number of bitmap bytes to copy.
  1408  			n := words / 4
  1409  			memmove(unsafe.Pointer(h.bitp), unsafe.Pointer(src), n)
  1410  			cnw -= words
  1411  			h = hNext
  1412  			src = addb(src, n)
  1413  		}
  1414  		if doubleCheck && h.shift != 0 {
  1415  			print("cnw=", cnw, " h.shift=", h.shift, "\n")
  1416  			throw("bad shift after block copy")
  1417  		}
  1418  		// Handle the last byte if it's shared.
  1419  		if cnw == 2 {
  1420  			*h.bitp = *h.bitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | *src
  1421  			src = addb(src, 1)
  1422  			h = h.next().next()
  1423  		}
  1424  		if doubleCheck {
  1425  			if uintptr(unsafe.Pointer(src)) > x+size {
  1426  				throw("copy exceeded object size")
  1427  			}
  1428  			if !(cnw == 0 || cnw == 2) {
  1429  				print("x=", x, " size=", size, " cnw=", cnw, "\n")
  1430  				throw("bad number of remaining words")
  1431  			}
  1432  			// Set up hbitp so doubleCheck code below can check it.
  1433  			hbitp = h.bitp
  1434  		}
  1435  		// Zero the object where we wrote the bitmap.
  1436  		memclrNoHeapPointers(unsafe.Pointer(x), uintptr(unsafe.Pointer(src))-x)
  1437  	}
  1438  
  1439  	// Double check the whole bitmap.
  1440  	if doubleCheck {
  1441  		// x+size may not point to the heap, so back up one
  1442  		// word and then call next().
  1443  		end := heapBitsForAddr(x + size - sys.PtrSize).next()
  1444  		endAI := arenaIdx(end.arena)
  1445  		if !outOfPlace && (end.bitp == nil || (end.shift == 0 && end.bitp == &mheap_.arenas[endAI.l1()][endAI.l2()].bitmap[0])) {
  1446  			// The unrolling code above walks hbitp just
  1447  			// past the bitmap without moving to the next
  1448  			// arena. Synthesize this for end.bitp.
  1449  			end.arena--
  1450  			endAI = arenaIdx(end.arena)
  1451  			end.bitp = addb(&mheap_.arenas[endAI.l1()][endAI.l2()].bitmap[0], heapArenaBitmapBytes)
  1452  			end.last = nil
  1453  		}
  1454  		if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) {
  1455  			println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size)
  1456  			print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
  1457  			print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
  1458  			h0 := heapBitsForAddr(x)
  1459  			print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
  1460  			print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n")
  1461  			throw("bad heapBitsSetType")
  1462  		}
  1463  
  1464  		// Double-check that bits to be written were written correctly.
  1465  		// Does not check that other bits were not written, unfortunately.
  1466  		h := heapBitsForAddr(x)
  1467  		nptr := typ.ptrdata / sys.PtrSize
  1468  		ndata := typ.size / sys.PtrSize
  1469  		count := dataSize / typ.size
  1470  		totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
  1471  		for i := uintptr(0); i < size/sys.PtrSize; i++ {
  1472  			j := i % ndata
  1473  			var have, want uint8
  1474  			have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
  1475  			if i >= totalptr {
  1476  				want = 0 // deadmarker
  1477  				if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 {
  1478  					want = bitScan
  1479  				}
  1480  			} else {
  1481  				if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 {
  1482  					want |= bitPointer
  1483  				}
  1484  				if i != 1 {
  1485  					want |= bitScan
  1486  				} else {
  1487  					have &^= bitScan
  1488  				}
  1489  			}
  1490  			if have != want {
  1491  				println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size)
  1492  				print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
  1493  				print("kindGCProg=", typ.kind&kindGCProg != 0, " outOfPlace=", outOfPlace, "\n")
  1494  				print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
  1495  				h0 := heapBitsForAddr(x)
  1496  				print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
  1497  				print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
  1498  				print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
  1499  				println("at word", i, "offset", i*sys.PtrSize, "have", hex(have), "want", hex(want))
  1500  				if typ.kind&kindGCProg != 0 {
  1501  					println("GC program:")
  1502  					dumpGCProg(addb(typ.gcdata, 4))
  1503  				}
  1504  				throw("bad heapBitsSetType")
  1505  			}
  1506  			h = h.next()
  1507  		}
  1508  		if ptrmask == debugPtrmask.data {
  1509  			unlock(&debugPtrmask.lock)
  1510  		}
  1511  	}
  1512  }
  1513  
  1514  var debugPtrmask struct {
  1515  	lock mutex
  1516  	data *byte
  1517  }
  1518  
  1519  // heapBitsSetTypeGCProg implements heapBitsSetType using a GC program.
  1520  // progSize is the size of the memory described by the program.
  1521  // elemSize is the size of the element that the GC program describes (a prefix of).
  1522  // dataSize is the total size of the intended data, a multiple of elemSize.
  1523  // allocSize is the total size of the allocated memory.
  1524  //
  1525  // GC programs are only used for large allocations.
  1526  // heapBitsSetType requires that allocSize is a multiple of 4 words,
  1527  // so that the relevant bitmap bytes are not shared with surrounding
  1528  // objects.
  1529  func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
  1530  	if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
  1531  		// Alignment will be wrong.
  1532  		throw("heapBitsSetTypeGCProg: small allocation")
  1533  	}
  1534  	var totalBits uintptr
  1535  	if elemSize == dataSize {
  1536  		totalBits = runGCProg(prog, nil, h.bitp, 2)
  1537  		if totalBits*sys.PtrSize != progSize {
  1538  			println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
  1539  			throw("heapBitsSetTypeGCProg: unexpected bit count")
  1540  		}
  1541  	} else {
  1542  		count := dataSize / elemSize
  1543  
  1544  		// Piece together program trailer to run after prog that does:
  1545  		//	literal(0)
  1546  		//	repeat(1, elemSize-progSize-1) // zeros to fill element size
  1547  		//	repeat(elemSize, count-1) // repeat that element for count
  1548  		// This zero-pads the data remaining in the first element and then
  1549  		// repeats that first element to fill the array.
  1550  		var trailer [40]byte // 3 varints (max 10 each) + some bytes
  1551  		i := 0
  1552  		if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
  1553  			// literal(0)
  1554  			trailer[i] = 0x01
  1555  			i++
  1556  			trailer[i] = 0
  1557  			i++
  1558  			if n > 1 {
  1559  				// repeat(1, n-1)
  1560  				trailer[i] = 0x81
  1561  				i++
  1562  				n--
  1563  				for ; n >= 0x80; n >>= 7 {
  1564  					trailer[i] = byte(n | 0x80)
  1565  					i++
  1566  				}
  1567  				trailer[i] = byte(n)
  1568  				i++
  1569  			}
  1570  		}
  1571  		// repeat(elemSize/ptrSize, count-1)
  1572  		trailer[i] = 0x80
  1573  		i++
  1574  		n := elemSize / sys.PtrSize
  1575  		for ; n >= 0x80; n >>= 7 {
  1576  			trailer[i] = byte(n | 0x80)
  1577  			i++
  1578  		}
  1579  		trailer[i] = byte(n)
  1580  		i++
  1581  		n = count - 1
  1582  		for ; n >= 0x80; n >>= 7 {
  1583  			trailer[i] = byte(n | 0x80)
  1584  			i++
  1585  		}
  1586  		trailer[i] = byte(n)
  1587  		i++
  1588  		trailer[i] = 0
  1589  		i++
  1590  
  1591  		runGCProg(prog, &trailer[0], h.bitp, 2)
  1592  
  1593  		// Even though we filled in the full array just now,
  1594  		// record that we only filled in up to the ptrdata of the
  1595  		// last element. This will cause the code below to
  1596  		// memclr the dead section of the final array element,
  1597  		// so that scanobject can stop early in the final element.
  1598  		totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
  1599  	}
  1600  	endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
  1601  	endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/sys.PtrSize/wordsPerBitmapByte))
  1602  	memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
  1603  }
  1604  
  1605  // progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
  1606  // size the size of the region described by prog, in bytes.
  1607  // The resulting bitvector will have no more than size/sys.PtrSize bits.
  1608  func progToPointerMask(prog *byte, size uintptr) bitvector {
  1609  	n := (size/sys.PtrSize + 7) / 8
  1610  	x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
  1611  	x[len(x)-1] = 0xa1 // overflow check sentinel
  1612  	n = runGCProg(prog, nil, &x[0], 1)
  1613  	if x[len(x)-1] != 0xa1 {
  1614  		throw("progToPointerMask: overflow")
  1615  	}
  1616  	return bitvector{int32(n), &x[0]}
  1617  }
  1618  
  1619  // Packed GC pointer bitmaps, aka GC programs.
  1620  //
  1621  // For large types containing arrays, the type information has a
  1622  // natural repetition that can be encoded to save space in the
  1623  // binary and in the memory representation of the type information.
  1624  //
  1625  // The encoding is a simple Lempel-Ziv style bytecode machine
  1626  // with the following instructions:
  1627  //
  1628  //	00000000: stop
  1629  //	0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
  1630  //	10000000 n c: repeat the previous n bits c times; n, c are varints
  1631  //	1nnnnnnn c: repeat the previous n bits c times; c is a varint
  1632  
  1633  // runGCProg executes the GC program prog, and then trailer if non-nil,
  1634  // writing to dst with entries of the given size.
  1635  // If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst.
  1636  // If size == 2, dst is the 2-bit heap bitmap, and writes move backward
  1637  // starting at dst (because the heap bitmap does). In this case, the caller guarantees
  1638  // that only whole bytes in dst need to be written.
  1639  //
  1640  // runGCProg returns the number of 1- or 2-bit entries written to memory.
  1641  func runGCProg(prog, trailer, dst *byte, size int) uintptr {
  1642  	dstStart := dst
  1643  
  1644  	// Bits waiting to be written to memory.
  1645  	var bits uintptr
  1646  	var nbits uintptr
  1647  
  1648  	p := prog
  1649  Run:
  1650  	for {
  1651  		// Flush accumulated full bytes.
  1652  		// The rest of the loop assumes that nbits <= 7.
  1653  		for ; nbits >= 8; nbits -= 8 {
  1654  			if size == 1 {
  1655  				*dst = uint8(bits)
  1656  				dst = add1(dst)
  1657  				bits >>= 8
  1658  			} else {
  1659  				v := bits&bitPointerAll | bitScanAll
  1660  				*dst = uint8(v)
  1661  				dst = add1(dst)
  1662  				bits >>= 4
  1663  				v = bits&bitPointerAll | bitScanAll
  1664  				*dst = uint8(v)
  1665  				dst = add1(dst)
  1666  				bits >>= 4
  1667  			}
  1668  		}
  1669  
  1670  		// Process one instruction.
  1671  		inst := uintptr(*p)
  1672  		p = add1(p)
  1673  		n := inst & 0x7F
  1674  		if inst&0x80 == 0 {
  1675  			// Literal bits; n == 0 means end of program.
  1676  			if n == 0 {
  1677  				// Program is over; continue in trailer if present.
  1678  				if trailer != nil {
  1679  					p = trailer
  1680  					trailer = nil
  1681  					continue
  1682  				}
  1683  				break Run
  1684  			}
  1685  			nbyte := n / 8
  1686  			for i := uintptr(0); i < nbyte; i++ {
  1687  				bits |= uintptr(*p) << nbits
  1688  				p = add1(p)
  1689  				if size == 1 {
  1690  					*dst = uint8(bits)
  1691  					dst = add1(dst)
  1692  					bits >>= 8
  1693  				} else {
  1694  					v := bits&0xf | bitScanAll
  1695  					*dst = uint8(v)
  1696  					dst = add1(dst)
  1697  					bits >>= 4
  1698  					v = bits&0xf | bitScanAll
  1699  					*dst = uint8(v)
  1700  					dst = add1(dst)
  1701  					bits >>= 4
  1702  				}
  1703  			}
  1704  			if n %= 8; n > 0 {
  1705  				bits |= uintptr(*p) << nbits
  1706  				p = add1(p)
  1707  				nbits += n
  1708  			}
  1709  			continue Run
  1710  		}
  1711  
  1712  		// Repeat. If n == 0, it is encoded in a varint in the next bytes.
  1713  		if n == 0 {
  1714  			for off := uint(0); ; off += 7 {
  1715  				x := uintptr(*p)
  1716  				p = add1(p)
  1717  				n |= (x & 0x7F) << off
  1718  				if x&0x80 == 0 {
  1719  					break
  1720  				}
  1721  			}
  1722  		}
  1723  
  1724  		// Count is encoded in a varint in the next bytes.
  1725  		c := uintptr(0)
  1726  		for off := uint(0); ; off += 7 {
  1727  			x := uintptr(*p)
  1728  			p = add1(p)
  1729  			c |= (x & 0x7F) << off
  1730  			if x&0x80 == 0 {
  1731  				break
  1732  			}
  1733  		}
  1734  		c *= n // now total number of bits to copy
  1735  
  1736  		// If the number of bits being repeated is small, load them
  1737  		// into a register and use that register for the entire loop
  1738  		// instead of repeatedly reading from memory.
  1739  		// Handling fewer than 8 bits here makes the general loop simpler.
  1740  		// The cutoff is sys.PtrSize*8 - 7 to guarantee that when we add
  1741  		// the pattern to a bit buffer holding at most 7 bits (a partial byte)
  1742  		// it will not overflow.
  1743  		src := dst
  1744  		const maxBits = sys.PtrSize*8 - 7
  1745  		if n <= maxBits {
  1746  			// Start with bits in output buffer.
  1747  			pattern := bits
  1748  			npattern := nbits
  1749  
  1750  			// If we need more bits, fetch them from memory.
  1751  			if size == 1 {
  1752  				src = subtract1(src)
  1753  				for npattern < n {
  1754  					pattern <<= 8
  1755  					pattern |= uintptr(*src)
  1756  					src = subtract1(src)
  1757  					npattern += 8
  1758  				}
  1759  			} else {
  1760  				src = subtract1(src)
  1761  				for npattern < n {
  1762  					pattern <<= 4
  1763  					pattern |= uintptr(*src) & 0xf
  1764  					src = subtract1(src)
  1765  					npattern += 4
  1766  				}
  1767  			}
  1768  
  1769  			// We started with the whole bit output buffer,
  1770  			// and then we loaded bits from whole bytes.
  1771  			// Either way, we might now have too many instead of too few.
  1772  			// Discard the extra.
  1773  			if npattern > n {
  1774  				pattern >>= npattern - n
  1775  				npattern = n
  1776  			}
  1777  
  1778  			// Replicate pattern to at most maxBits.
  1779  			if npattern == 1 {
  1780  				// One bit being repeated.
  1781  				// If the bit is 1, make the pattern all 1s.
  1782  				// If the bit is 0, the pattern is already all 0s,
  1783  				// but we can claim that the number of bits
  1784  				// in the word is equal to the number we need (c),
  1785  				// because right shift of bits will zero fill.
  1786  				if pattern == 1 {
  1787  					pattern = 1<<maxBits - 1
  1788  					npattern = maxBits
  1789  				} else {
  1790  					npattern = c
  1791  				}
  1792  			} else {
  1793  				b := pattern
  1794  				nb := npattern
  1795  				if nb+nb <= maxBits {
  1796  					// Double pattern until the whole uintptr is filled.
  1797  					for nb <= sys.PtrSize*8 {
  1798  						b |= b << nb
  1799  						nb += nb
  1800  					}
  1801  					// Trim away incomplete copy of original pattern in high bits.
  1802  					// TODO(rsc): Replace with table lookup or loop on systems without divide?
  1803  					nb = maxBits / npattern * npattern
  1804  					b &= 1<<nb - 1
  1805  					pattern = b
  1806  					npattern = nb
  1807  				}
  1808  			}
  1809  
  1810  			// Add pattern to bit buffer and flush bit buffer, c/npattern times.
  1811  			// Since pattern contains >8 bits, there will be full bytes to flush
  1812  			// on each iteration.
  1813  			for ; c >= npattern; c -= npattern {
  1814  				bits |= pattern << nbits
  1815  				nbits += npattern
  1816  				if size == 1 {
  1817  					for nbits >= 8 {
  1818  						*dst = uint8(bits)
  1819  						dst = add1(dst)
  1820  						bits >>= 8
  1821  						nbits -= 8
  1822  					}
  1823  				} else {
  1824  					for nbits >= 4 {
  1825  						*dst = uint8(bits&0xf | bitScanAll)
  1826  						dst = add1(dst)
  1827  						bits >>= 4
  1828  						nbits -= 4
  1829  					}
  1830  				}
  1831  			}
  1832  
  1833  			// Add final fragment to bit buffer.
  1834  			if c > 0 {
  1835  				pattern &= 1<<c - 1
  1836  				bits |= pattern << nbits
  1837  				nbits += c
  1838  			}
  1839  			continue Run
  1840  		}
  1841  
  1842  		// Repeat; n too large to fit in a register.
  1843  		// Since nbits <= 7, we know the first few bytes of repeated data
  1844  		// are already written to memory.
  1845  		off := n - nbits // n > nbits because n > maxBits and nbits <= 7
  1846  		if size == 1 {
  1847  			// Leading src fragment.
  1848  			src = subtractb(src, (off+7)/8)
  1849  			if frag := off & 7; frag != 0 {
  1850  				bits |= uintptr(*src) >> (8 - frag) << nbits
  1851  				src = add1(src)
  1852  				nbits += frag
  1853  				c -= frag
  1854  			}
  1855  			// Main loop: load one byte, write another.
  1856  			// The bits are rotating through the bit buffer.
  1857  			for i := c / 8; i > 0; i-- {
  1858  				bits |= uintptr(*src) << nbits
  1859  				src = add1(src)
  1860  				*dst = uint8(bits)
  1861  				dst = add1(dst)
  1862  				bits >>= 8
  1863  			}
  1864  			// Final src fragment.
  1865  			if c %= 8; c > 0 {
  1866  				bits |= (uintptr(*src) & (1<<c - 1)) << nbits
  1867  				nbits += c
  1868  			}
  1869  		} else {
  1870  			// Leading src fragment.
  1871  			src = subtractb(src, (off+3)/4)
  1872  			if frag := off & 3; frag != 0 {
  1873  				bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits
  1874  				src = add1(src)
  1875  				nbits += frag
  1876  				c -= frag
  1877  			}
  1878  			// Main loop: load one byte, write another.
  1879  			// The bits are rotating through the bit buffer.
  1880  			for i := c / 4; i > 0; i-- {
  1881  				bits |= (uintptr(*src) & 0xf) << nbits
  1882  				src = add1(src)
  1883  				*dst = uint8(bits&0xf | bitScanAll)
  1884  				dst = add1(dst)
  1885  				bits >>= 4
  1886  			}
  1887  			// Final src fragment.
  1888  			if c %= 4; c > 0 {
  1889  				bits |= (uintptr(*src) & (1<<c - 1)) << nbits
  1890  				nbits += c
  1891  			}
  1892  		}
  1893  	}
  1894  
  1895  	// Write any final bits out, using full-byte writes, even for the final byte.
  1896  	var totalBits uintptr
  1897  	if size == 1 {
  1898  		totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
  1899  		nbits += -nbits & 7
  1900  		for ; nbits > 0; nbits -= 8 {
  1901  			*dst = uint8(bits)
  1902  			dst = add1(dst)
  1903  			bits >>= 8
  1904  		}
  1905  	} else {
  1906  		totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*4 + nbits
  1907  		nbits += -nbits & 3
  1908  		for ; nbits > 0; nbits -= 4 {
  1909  			v := bits&0xf | bitScanAll
  1910  			*dst = uint8(v)
  1911  			dst = add1(dst)
  1912  			bits >>= 4
  1913  		}
  1914  	}
  1915  	return totalBits
  1916  }
  1917  
  1918  // materializeGCProg allocates space for the (1-bit) pointer bitmask
  1919  // for an object of size ptrdata.  Then it fills that space with the
  1920  // pointer bitmask specified by the program prog.
  1921  // The bitmask starts at s.startAddr.
  1922  // The result must be deallocated with dematerializeGCProg.
  1923  func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
  1924  	// Each word of ptrdata needs one bit in the bitmap.
  1925  	bitmapBytes := divRoundUp(ptrdata, 8*sys.PtrSize)
  1926  	// Compute the number of pages needed for bitmapBytes.
  1927  	pages := divRoundUp(bitmapBytes, pageSize)
  1928  	s := mheap_.allocManual(pages, &memstats.gc_sys)
  1929  	runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1)
  1930  	return s
  1931  }
  1932  func dematerializeGCProg(s *mspan) {
  1933  	mheap_.freeManual(s, &memstats.gc_sys)
  1934  }
  1935  
  1936  func dumpGCProg(p *byte) {
  1937  	nptr := 0
  1938  	for {
  1939  		x := *p
  1940  		p = add1(p)
  1941  		if x == 0 {
  1942  			print("\t", nptr, " end\n")
  1943  			break
  1944  		}
  1945  		if x&0x80 == 0 {
  1946  			print("\t", nptr, " lit ", x, ":")
  1947  			n := int(x+7) / 8
  1948  			for i := 0; i < n; i++ {
  1949  				print(" ", hex(*p))
  1950  				p = add1(p)
  1951  			}
  1952  			print("\n")
  1953  			nptr += int(x)
  1954  		} else {
  1955  			nbit := int(x &^ 0x80)
  1956  			if nbit == 0 {
  1957  				for nb := uint(0); ; nb += 7 {
  1958  					x := *p
  1959  					p = add1(p)
  1960  					nbit |= int(x&0x7f) << nb
  1961  					if x&0x80 == 0 {
  1962  						break
  1963  					}
  1964  				}
  1965  			}
  1966  			count := 0
  1967  			for nb := uint(0); ; nb += 7 {
  1968  				x := *p
  1969  				p = add1(p)
  1970  				count |= int(x&0x7f) << nb
  1971  				if x&0x80 == 0 {
  1972  					break
  1973  				}
  1974  			}
  1975  			print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
  1976  			nptr += nbit * count
  1977  		}
  1978  	}
  1979  }
  1980  
  1981  // Testing.
  1982  
  1983  func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
  1984  	target := (*stkframe)(ctxt)
  1985  	if frame.sp <= target.sp && target.sp < frame.varp {
  1986  		*target = *frame
  1987  		return false
  1988  	}
  1989  	return true
  1990  }
  1991  
  1992  // gcbits returns the GC type info for x, for testing.
  1993  // The result is the bitmap entries (0 or 1), one entry per byte.
  1994  //go:linkname reflect_gcbits reflect.gcbits
  1995  func reflect_gcbits(x interface{}) []byte {
  1996  	ret := getgcmask(x)
  1997  	typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
  1998  	nptr := typ.ptrdata / sys.PtrSize
  1999  	for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
  2000  		ret = ret[:len(ret)-1]
  2001  	}
  2002  	return ret
  2003  }
  2004  
  2005  // Returns GC type info for the pointer stored in ep for testing.
  2006  // If ep points to the stack, only static live information will be returned
  2007  // (i.e. not for objects which are only dynamically live stack objects).
  2008  func getgcmask(ep interface{}) (mask []byte) {
  2009  	e := *efaceOf(&ep)
  2010  	p := e.data
  2011  	t := e._type
  2012  	// data or bss
  2013  	for _, datap := range activeModules() {
  2014  		// data
  2015  		if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
  2016  			bitmap := datap.gcdatamask.bytedata
  2017  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  2018  			mask = make([]byte, n/sys.PtrSize)
  2019  			for i := uintptr(0); i < n; i += sys.PtrSize {
  2020  				off := (uintptr(p) + i - datap.data) / sys.PtrSize
  2021  				mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  2022  			}
  2023  			return
  2024  		}
  2025  
  2026  		// bss
  2027  		if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
  2028  			bitmap := datap.gcbssmask.bytedata
  2029  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  2030  			mask = make([]byte, n/sys.PtrSize)
  2031  			for i := uintptr(0); i < n; i += sys.PtrSize {
  2032  				off := (uintptr(p) + i - datap.bss) / sys.PtrSize
  2033  				mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  2034  			}
  2035  			return
  2036  		}
  2037  	}
  2038  
  2039  	// heap
  2040  	if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
  2041  		hbits := heapBitsForAddr(base)
  2042  		n := s.elemsize
  2043  		mask = make([]byte, n/sys.PtrSize)
  2044  		for i := uintptr(0); i < n; i += sys.PtrSize {
  2045  			if hbits.isPointer() {
  2046  				mask[i/sys.PtrSize] = 1
  2047  			}
  2048  			if i != 1*sys.PtrSize && !hbits.morePointers() {
  2049  				mask = mask[:i/sys.PtrSize]
  2050  				break
  2051  			}
  2052  			hbits = hbits.next()
  2053  		}
  2054  		return
  2055  	}
  2056  
  2057  	// stack
  2058  	if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi {
  2059  		var frame stkframe
  2060  		frame.sp = uintptr(p)
  2061  		_g_ := getg()
  2062  		gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
  2063  		if frame.fn.valid() {
  2064  			locals, _, _ := getStackMap(&frame, nil, false)
  2065  			if locals.n == 0 {
  2066  				return
  2067  			}
  2068  			size := uintptr(locals.n) * sys.PtrSize
  2069  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  2070  			mask = make([]byte, n/sys.PtrSize)
  2071  			for i := uintptr(0); i < n; i += sys.PtrSize {
  2072  				off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
  2073  				mask[i/sys.PtrSize] = locals.ptrbit(off)
  2074  			}
  2075  		}
  2076  		return
  2077  	}
  2078  
  2079  	// otherwise, not something the GC knows about.
  2080  	// possibly read-only data, like malloc(0).
  2081  	// must not have pointers
  2082  	return
  2083  }
  2084  

View as plain text