Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/mranges.go

Documentation: runtime

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Address range data structure.
     6  //
     7  // This file contains an implementation of a data structure which
     8  // manages ordered address ranges.
     9  
    10  package runtime
    11  
    12  import (
    13  	"runtime/internal/sys"
    14  	"unsafe"
    15  )
    16  
    17  // addrRange represents a region of address space.
    18  //
    19  // An addrRange must never span a gap in the address space.
    20  type addrRange struct {
    21  	// base and limit together represent the region of address space
    22  	// [base, limit). That is, base is inclusive, limit is exclusive.
    23  	// These are address over an offset view of the address space on
    24  	// platforms with a segmented address space, that is, on platforms
    25  	// where arenaBaseOffset != 0.
    26  	base, limit offAddr
    27  }
    28  
    29  // makeAddrRange creates a new address range from two virtual addresses.
    30  //
    31  // Throws if the base and limit are not in the same memory segment.
    32  func makeAddrRange(base, limit uintptr) addrRange {
    33  	r := addrRange{offAddr{base}, offAddr{limit}}
    34  	if (base-arenaBaseOffset >= base) != (limit-arenaBaseOffset >= limit) {
    35  		throw("addr range base and limit are not in the same memory segment")
    36  	}
    37  	return r
    38  }
    39  
    40  // size returns the size of the range represented in bytes.
    41  func (a addrRange) size() uintptr {
    42  	if !a.base.lessThan(a.limit) {
    43  		return 0
    44  	}
    45  	// Subtraction is safe because limit and base must be in the same
    46  	// segment of the address space.
    47  	return a.limit.diff(a.base)
    48  }
    49  
    50  // contains returns whether or not the range contains a given address.
    51  func (a addrRange) contains(addr uintptr) bool {
    52  	return a.base.lessEqual(offAddr{addr}) && (offAddr{addr}).lessThan(a.limit)
    53  }
    54  
    55  // subtract takes the addrRange toPrune and cuts out any overlap with
    56  // from, then returns the new range. subtract assumes that a and b
    57  // either don't overlap at all, only overlap on one side, or are equal.
    58  // If b is strictly contained in a, thus forcing a split, it will throw.
    59  func (a addrRange) subtract(b addrRange) addrRange {
    60  	if b.base.lessEqual(a.base) && a.limit.lessEqual(b.limit) {
    61  		return addrRange{}
    62  	} else if a.base.lessThan(b.base) && b.limit.lessThan(a.limit) {
    63  		throw("bad prune")
    64  	} else if b.limit.lessThan(a.limit) && a.base.lessThan(b.limit) {
    65  		a.base = b.limit
    66  	} else if a.base.lessThan(b.base) && b.base.lessThan(a.limit) {
    67  		a.limit = b.base
    68  	}
    69  	return a
    70  }
    71  
    72  // removeGreaterEqual removes all addresses in a greater than or equal
    73  // to addr and returns the new range.
    74  func (a addrRange) removeGreaterEqual(addr uintptr) addrRange {
    75  	if (offAddr{addr}).lessEqual(a.base) {
    76  		return addrRange{}
    77  	}
    78  	if a.limit.lessEqual(offAddr{addr}) {
    79  		return a
    80  	}
    81  	return makeAddrRange(a.base.addr(), addr)
    82  }
    83  
    84  var (
    85  	// minOffAddr is the minimum address in the offset space, and
    86  	// it corresponds to the virtual address arenaBaseOffset.
    87  	minOffAddr = offAddr{arenaBaseOffset}
    88  
    89  	// maxOffAddr is the maximum address in the offset address
    90  	// space. It corresponds to the highest virtual address representable
    91  	// by the page alloc chunk and heap arena maps.
    92  	maxOffAddr = offAddr{(((1 << heapAddrBits) - 1) + arenaBaseOffset) & uintptrMask}
    93  )
    94  
    95  // offAddr represents an address in a contiguous view
    96  // of the address space on systems where the address space is
    97  // segmented. On other systems, it's just a normal address.
    98  type offAddr struct {
    99  	// a is just the virtual address, but should never be used
   100  	// directly. Call addr() to get this value instead.
   101  	a uintptr
   102  }
   103  
   104  // add adds a uintptr offset to the offAddr.
   105  func (l offAddr) add(bytes uintptr) offAddr {
   106  	return offAddr{a: l.a + bytes}
   107  }
   108  
   109  // sub subtracts a uintptr offset from the offAddr.
   110  func (l offAddr) sub(bytes uintptr) offAddr {
   111  	return offAddr{a: l.a - bytes}
   112  }
   113  
   114  // diff returns the amount of bytes in between the
   115  // two offAddrs.
   116  func (l1 offAddr) diff(l2 offAddr) uintptr {
   117  	return l1.a - l2.a
   118  }
   119  
   120  // lessThan returns true if l1 is less than l2 in the offset
   121  // address space.
   122  func (l1 offAddr) lessThan(l2 offAddr) bool {
   123  	return (l1.a - arenaBaseOffset) < (l2.a - arenaBaseOffset)
   124  }
   125  
   126  // lessEqual returns true if l1 is less than or equal to l2 in
   127  // the offset address space.
   128  func (l1 offAddr) lessEqual(l2 offAddr) bool {
   129  	return (l1.a - arenaBaseOffset) <= (l2.a - arenaBaseOffset)
   130  }
   131  
   132  // equal returns true if the two offAddr values are equal.
   133  func (l1 offAddr) equal(l2 offAddr) bool {
   134  	// No need to compare in the offset space, it
   135  	// means the same thing.
   136  	return l1 == l2
   137  }
   138  
   139  // addr returns the virtual address for this offset address.
   140  func (l offAddr) addr() uintptr {
   141  	return l.a
   142  }
   143  
   144  // addrRanges is a data structure holding a collection of ranges of
   145  // address space.
   146  //
   147  // The ranges are coalesced eagerly to reduce the
   148  // number ranges it holds.
   149  //
   150  // The slice backing store for this field is persistentalloc'd
   151  // and thus there is no way to free it.
   152  //
   153  // addrRanges is not thread-safe.
   154  type addrRanges struct {
   155  	// ranges is a slice of ranges sorted by base.
   156  	ranges []addrRange
   157  
   158  	// totalBytes is the total amount of address space in bytes counted by
   159  	// this addrRanges.
   160  	totalBytes uintptr
   161  
   162  	// sysStat is the stat to track allocations by this type
   163  	sysStat *uint64
   164  }
   165  
   166  func (a *addrRanges) init(sysStat *uint64) {
   167  	ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
   168  	ranges.len = 0
   169  	ranges.cap = 16
   170  	ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, sysStat))
   171  	a.sysStat = sysStat
   172  	a.totalBytes = 0
   173  }
   174  
   175  // findSucc returns the first index in a such that base is
   176  // less than the base of the addrRange at that index.
   177  func (a *addrRanges) findSucc(addr uintptr) int {
   178  	// TODO(mknyszek): Consider a binary search for large arrays.
   179  	// While iterating over these ranges is potentially expensive,
   180  	// the expected number of ranges is small, ideally just 1,
   181  	// since Go heaps are usually mostly contiguous.
   182  	base := offAddr{addr}
   183  	for i := range a.ranges {
   184  		if base.lessThan(a.ranges[i].base) {
   185  			return i
   186  		}
   187  	}
   188  	return len(a.ranges)
   189  }
   190  
   191  // findAddrGreaterEqual returns the smallest address represented by a
   192  // that is >= addr. Thus, if the address is represented by a,
   193  // then it returns addr. The second return value indicates whether
   194  // such an address exists for addr in a. That is, if addr is larger than
   195  // any address known to a, the second return value will be false.
   196  func (a *addrRanges) findAddrGreaterEqual(addr uintptr) (uintptr, bool) {
   197  	i := a.findSucc(addr)
   198  	if i == 0 {
   199  		return a.ranges[0].base.addr(), true
   200  	}
   201  	if a.ranges[i-1].contains(addr) {
   202  		return addr, true
   203  	}
   204  	if i < len(a.ranges) {
   205  		return a.ranges[i].base.addr(), true
   206  	}
   207  	return 0, false
   208  }
   209  
   210  // contains returns true if a covers the address addr.
   211  func (a *addrRanges) contains(addr uintptr) bool {
   212  	i := a.findSucc(addr)
   213  	if i == 0 {
   214  		return false
   215  	}
   216  	return a.ranges[i-1].contains(addr)
   217  }
   218  
   219  // add inserts a new address range to a.
   220  //
   221  // r must not overlap with any address range in a.
   222  func (a *addrRanges) add(r addrRange) {
   223  	// The copies in this function are potentially expensive, but this data
   224  	// structure is meant to represent the Go heap. At worst, copying this
   225  	// would take ~160µs assuming a conservative copying rate of 25 GiB/s (the
   226  	// copy will almost never trigger a page fault) for a 1 TiB heap with 4 MiB
   227  	// arenas which is completely discontiguous. ~160µs is still a lot, but in
   228  	// practice most platforms have 64 MiB arenas (which cuts this by a factor
   229  	// of 16) and Go heaps are usually mostly contiguous, so the chance that
   230  	// an addrRanges even grows to that size is extremely low.
   231  
   232  	// Because we assume r is not currently represented in a,
   233  	// findSucc gives us our insertion index.
   234  	i := a.findSucc(r.base.addr())
   235  	coalescesDown := i > 0 && a.ranges[i-1].limit.equal(r.base)
   236  	coalescesUp := i < len(a.ranges) && r.limit.equal(a.ranges[i].base)
   237  	if coalescesUp && coalescesDown {
   238  		// We have neighbors and they both border us.
   239  		// Merge a.ranges[i-1], r, and a.ranges[i] together into a.ranges[i-1].
   240  		a.ranges[i-1].limit = a.ranges[i].limit
   241  
   242  		// Delete a.ranges[i].
   243  		copy(a.ranges[i:], a.ranges[i+1:])
   244  		a.ranges = a.ranges[:len(a.ranges)-1]
   245  	} else if coalescesDown {
   246  		// We have a neighbor at a lower address only and it borders us.
   247  		// Merge the new space into a.ranges[i-1].
   248  		a.ranges[i-1].limit = r.limit
   249  	} else if coalescesUp {
   250  		// We have a neighbor at a higher address only and it borders us.
   251  		// Merge the new space into a.ranges[i].
   252  		a.ranges[i].base = r.base
   253  	} else {
   254  		// We may or may not have neighbors which don't border us.
   255  		// Add the new range.
   256  		if len(a.ranges)+1 > cap(a.ranges) {
   257  			// Grow the array. Note that this leaks the old array, but since
   258  			// we're doubling we have at most 2x waste. For a 1 TiB heap and
   259  			// 4 MiB arenas which are all discontiguous (both very conservative
   260  			// assumptions), this would waste at most 4 MiB of memory.
   261  			oldRanges := a.ranges
   262  			ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
   263  			ranges.len = len(oldRanges) + 1
   264  			ranges.cap = cap(oldRanges) * 2
   265  			ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, a.sysStat))
   266  
   267  			// Copy in the old array, but make space for the new range.
   268  			copy(a.ranges[:i], oldRanges[:i])
   269  			copy(a.ranges[i+1:], oldRanges[i:])
   270  		} else {
   271  			a.ranges = a.ranges[:len(a.ranges)+1]
   272  			copy(a.ranges[i+1:], a.ranges[i:])
   273  		}
   274  		a.ranges[i] = r
   275  	}
   276  	a.totalBytes += r.size()
   277  }
   278  
   279  // removeLast removes and returns the highest-addressed contiguous range
   280  // of a, or the last nBytes of that range, whichever is smaller. If a is
   281  // empty, it returns an empty range.
   282  func (a *addrRanges) removeLast(nBytes uintptr) addrRange {
   283  	if len(a.ranges) == 0 {
   284  		return addrRange{}
   285  	}
   286  	r := a.ranges[len(a.ranges)-1]
   287  	size := r.size()
   288  	if size > nBytes {
   289  		newEnd := r.limit.sub(nBytes)
   290  		a.ranges[len(a.ranges)-1].limit = newEnd
   291  		a.totalBytes -= nBytes
   292  		return addrRange{newEnd, r.limit}
   293  	}
   294  	a.ranges = a.ranges[:len(a.ranges)-1]
   295  	a.totalBytes -= size
   296  	return r
   297  }
   298  
   299  // removeGreaterEqual removes the ranges of a which are above addr, and additionally
   300  // splits any range containing addr.
   301  func (a *addrRanges) removeGreaterEqual(addr uintptr) {
   302  	pivot := a.findSucc(addr)
   303  	if pivot == 0 {
   304  		// addr is before all ranges in a.
   305  		a.totalBytes = 0
   306  		a.ranges = a.ranges[:0]
   307  		return
   308  	}
   309  	removed := uintptr(0)
   310  	for _, r := range a.ranges[pivot:] {
   311  		removed += r.size()
   312  	}
   313  	if r := a.ranges[pivot-1]; r.contains(addr) {
   314  		removed += r.size()
   315  		r = r.removeGreaterEqual(addr)
   316  		if r.size() == 0 {
   317  			pivot--
   318  		} else {
   319  			removed -= r.size()
   320  			a.ranges[pivot-1] = r
   321  		}
   322  	}
   323  	a.ranges = a.ranges[:pivot]
   324  	a.totalBytes -= removed
   325  }
   326  
   327  // cloneInto makes a deep clone of a's state into b, re-using
   328  // b's ranges if able.
   329  func (a *addrRanges) cloneInto(b *addrRanges) {
   330  	if len(a.ranges) > cap(b.ranges) {
   331  		// Grow the array.
   332  		ranges := (*notInHeapSlice)(unsafe.Pointer(&b.ranges))
   333  		ranges.len = 0
   334  		ranges.cap = cap(a.ranges)
   335  		ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, b.sysStat))
   336  	}
   337  	b.ranges = b.ranges[:len(a.ranges)]
   338  	b.totalBytes = a.totalBytes
   339  	copy(b.ranges, a.ranges)
   340  }
   341  

View as plain text