Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/mcentral.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Central free lists.
     6  //
     7  // See malloc.go for an overview.
     8  //
     9  // The mcentral doesn't actually contain the list of free objects; the mspan does.
    10  // Each mcentral is two lists of mspans: those with free objects (c->nonempty)
    11  // and those that are completely allocated (c->empty).
    12  
    13  package runtime
    14  
    15  import "runtime/internal/atomic"
    16  
    17  // Central list of free objects of a given size.
    18  //
    19  //go:notinheap
    20  type mcentral struct {
    21  	lock      mutex
    22  	spanclass spanClass
    23  
    24  	// For !go115NewMCentralImpl.
    25  	nonempty mSpanList // list of spans with a free object, ie a nonempty free list
    26  	empty    mSpanList // list of spans with no free objects (or cached in an mcache)
    27  
    28  	// partial and full contain two mspan sets: one of swept in-use
    29  	// spans, and one of unswept in-use spans. These two trade
    30  	// roles on each GC cycle. The unswept set is drained either by
    31  	// allocation or by the background sweeper in every GC cycle,
    32  	// so only two roles are necessary.
    33  	//
    34  	// sweepgen is increased by 2 on each GC cycle, so the swept
    35  	// spans are in partial[sweepgen/2%2] and the unswept spans are in
    36  	// partial[1-sweepgen/2%2]. Sweeping pops spans from the
    37  	// unswept set and pushes spans that are still in-use on the
    38  	// swept set. Likewise, allocating an in-use span pushes it
    39  	// on the swept set.
    40  	//
    41  	// Some parts of the sweeper can sweep arbitrary spans, and hence
    42  	// can't remove them from the unswept set, but will add the span
    43  	// to the appropriate swept list. As a result, the parts of the
    44  	// sweeper and mcentral that do consume from the unswept list may
    45  	// encounter swept spans, and these should be ignored.
    46  	partial [2]spanSet // list of spans with a free object
    47  	full    [2]spanSet // list of spans with no free objects
    48  
    49  	// nmalloc is the cumulative count of objects allocated from
    50  	// this mcentral, assuming all spans in mcaches are
    51  	// fully-allocated. Written atomically, read under STW.
    52  	nmalloc uint64
    53  }
    54  
    55  // Initialize a single central free list.
    56  func (c *mcentral) init(spc spanClass) {
    57  	c.spanclass = spc
    58  	if go115NewMCentralImpl {
    59  		lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine)
    60  		lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine)
    61  		lockInit(&c.full[0].spineLock, lockRankSpanSetSpine)
    62  		lockInit(&c.full[1].spineLock, lockRankSpanSetSpine)
    63  	} else {
    64  		c.nonempty.init()
    65  		c.empty.init()
    66  		lockInit(&c.lock, lockRankMcentral)
    67  	}
    68  }
    69  
    70  // partialUnswept returns the spanSet which holds partially-filled
    71  // unswept spans for this sweepgen.
    72  func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet {
    73  	return &c.partial[1-sweepgen/2%2]
    74  }
    75  
    76  // partialSwept returns the spanSet which holds partially-filled
    77  // swept spans for this sweepgen.
    78  func (c *mcentral) partialSwept(sweepgen uint32) *spanSet {
    79  	return &c.partial[sweepgen/2%2]
    80  }
    81  
    82  // fullUnswept returns the spanSet which holds unswept spans without any
    83  // free slots for this sweepgen.
    84  func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet {
    85  	return &c.full[1-sweepgen/2%2]
    86  }
    87  
    88  // fullSwept returns the spanSet which holds swept spans without any
    89  // free slots for this sweepgen.
    90  func (c *mcentral) fullSwept(sweepgen uint32) *spanSet {
    91  	return &c.full[sweepgen/2%2]
    92  }
    93  
    94  // Allocate a span to use in an mcache.
    95  func (c *mcentral) cacheSpan() *mspan {
    96  	if !go115NewMCentralImpl {
    97  		return c.oldCacheSpan()
    98  	}
    99  	// Deduct credit for this span allocation and sweep if necessary.
   100  	spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
   101  	deductSweepCredit(spanBytes, 0)
   102  
   103  	sg := mheap_.sweepgen
   104  
   105  	traceDone := false
   106  	if trace.enabled {
   107  		traceGCSweepStart()
   108  	}
   109  
   110  	// If we sweep spanBudget spans without finding any free
   111  	// space, just allocate a fresh span. This limits the amount
   112  	// of time we can spend trying to find free space and
   113  	// amortizes the cost of small object sweeping over the
   114  	// benefit of having a full free span to allocate from. By
   115  	// setting this to 100, we limit the space overhead to 1%.
   116  	//
   117  	// TODO(austin,mknyszek): This still has bad worst-case
   118  	// throughput. For example, this could find just one free slot
   119  	// on the 100th swept span. That limits allocation latency, but
   120  	// still has very poor throughput. We could instead keep a
   121  	// running free-to-used budget and switch to fresh span
   122  	// allocation if the budget runs low.
   123  	spanBudget := 100
   124  
   125  	var s *mspan
   126  
   127  	// Try partial swept spans first.
   128  	if s = c.partialSwept(sg).pop(); s != nil {
   129  		goto havespan
   130  	}
   131  
   132  	// Now try partial unswept spans.
   133  	for ; spanBudget >= 0; spanBudget-- {
   134  		s = c.partialUnswept(sg).pop()
   135  		if s == nil {
   136  			break
   137  		}
   138  		if atomic.Load(&s.sweepgen) == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
   139  			// We got ownership of the span, so let's sweep it and use it.
   140  			s.sweep(true)
   141  			goto havespan
   142  		}
   143  		// We failed to get ownership of the span, which means it's being or
   144  		// has been swept by an asynchronous sweeper that just couldn't remove it
   145  		// from the unswept list. That sweeper took ownership of the span and
   146  		// responsibility for either freeing it to the heap or putting it on the
   147  		// right swept list. Either way, we should just ignore it (and it's unsafe
   148  		// for us to do anything else).
   149  	}
   150  	// Now try full unswept spans, sweeping them and putting them into the
   151  	// right list if we fail to get a span.
   152  	for ; spanBudget >= 0; spanBudget-- {
   153  		s = c.fullUnswept(sg).pop()
   154  		if s == nil {
   155  			break
   156  		}
   157  		if atomic.Load(&s.sweepgen) == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
   158  			// We got ownership of the span, so let's sweep it.
   159  			s.sweep(true)
   160  			// Check if there's any free space.
   161  			freeIndex := s.nextFreeIndex()
   162  			if freeIndex != s.nelems {
   163  				s.freeindex = freeIndex
   164  				goto havespan
   165  			}
   166  			// Add it to the swept list, because sweeping didn't give us any free space.
   167  			c.fullSwept(sg).push(s)
   168  		}
   169  		// See comment for partial unswept spans.
   170  	}
   171  	if trace.enabled {
   172  		traceGCSweepDone()
   173  		traceDone = true
   174  	}
   175  
   176  	// We failed to get a span from the mcentral so get one from mheap.
   177  	s = c.grow()
   178  	if s == nil {
   179  		return nil
   180  	}
   181  
   182  	// At this point s is a span that should have free slots.
   183  havespan:
   184  	if trace.enabled && !traceDone {
   185  		traceGCSweepDone()
   186  	}
   187  	n := int(s.nelems) - int(s.allocCount)
   188  	if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
   189  		throw("span has no free objects")
   190  	}
   191  	// Assume all objects from this span will be allocated in the
   192  	// mcache. If it gets uncached, we'll adjust this.
   193  	atomic.Xadd64(&c.nmalloc, int64(n))
   194  	usedBytes := uintptr(s.allocCount) * s.elemsize
   195  	atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
   196  	if trace.enabled {
   197  		// heap_live changed.
   198  		traceHeapAlloc()
   199  	}
   200  	if gcBlackenEnabled != 0 {
   201  		// heap_live changed.
   202  		gcController.revise()
   203  	}
   204  	freeByteBase := s.freeindex &^ (64 - 1)
   205  	whichByte := freeByteBase / 8
   206  	// Init alloc bits cache.
   207  	s.refillAllocCache(whichByte)
   208  
   209  	// Adjust the allocCache so that s.freeindex corresponds to the low bit in
   210  	// s.allocCache.
   211  	s.allocCache >>= s.freeindex % 64
   212  
   213  	return s
   214  }
   215  
   216  // Allocate a span to use in an mcache.
   217  //
   218  // For !go115NewMCentralImpl.
   219  func (c *mcentral) oldCacheSpan() *mspan {
   220  	// Deduct credit for this span allocation and sweep if necessary.
   221  	spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
   222  	deductSweepCredit(spanBytes, 0)
   223  
   224  	lock(&c.lock)
   225  	traceDone := false
   226  	if trace.enabled {
   227  		traceGCSweepStart()
   228  	}
   229  	sg := mheap_.sweepgen
   230  retry:
   231  	var s *mspan
   232  	for s = c.nonempty.first; s != nil; s = s.next {
   233  		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
   234  			c.nonempty.remove(s)
   235  			c.empty.insertBack(s)
   236  			unlock(&c.lock)
   237  			s.sweep(true)
   238  			goto havespan
   239  		}
   240  		if s.sweepgen == sg-1 {
   241  			// the span is being swept by background sweeper, skip
   242  			continue
   243  		}
   244  		// we have a nonempty span that does not require sweeping, allocate from it
   245  		c.nonempty.remove(s)
   246  		c.empty.insertBack(s)
   247  		unlock(&c.lock)
   248  		goto havespan
   249  	}
   250  
   251  	for s = c.empty.first; s != nil; s = s.next {
   252  		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
   253  			// we have an empty span that requires sweeping,
   254  			// sweep it and see if we can free some space in it
   255  			c.empty.remove(s)
   256  			// swept spans are at the end of the list
   257  			c.empty.insertBack(s)
   258  			unlock(&c.lock)
   259  			s.sweep(true)
   260  			freeIndex := s.nextFreeIndex()
   261  			if freeIndex != s.nelems {
   262  				s.freeindex = freeIndex
   263  				goto havespan
   264  			}
   265  			lock(&c.lock)
   266  			// the span is still empty after sweep
   267  			// it is already in the empty list, so just retry
   268  			goto retry
   269  		}
   270  		if s.sweepgen == sg-1 {
   271  			// the span is being swept by background sweeper, skip
   272  			continue
   273  		}
   274  		// already swept empty span,
   275  		// all subsequent ones must also be either swept or in process of sweeping
   276  		break
   277  	}
   278  	if trace.enabled {
   279  		traceGCSweepDone()
   280  		traceDone = true
   281  	}
   282  	unlock(&c.lock)
   283  
   284  	// Replenish central list if empty.
   285  	s = c.grow()
   286  	if s == nil {
   287  		return nil
   288  	}
   289  	lock(&c.lock)
   290  	c.empty.insertBack(s)
   291  	unlock(&c.lock)
   292  
   293  	// At this point s is a non-empty span, queued at the end of the empty list,
   294  	// c is unlocked.
   295  havespan:
   296  	if trace.enabled && !traceDone {
   297  		traceGCSweepDone()
   298  	}
   299  	n := int(s.nelems) - int(s.allocCount)
   300  	if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
   301  		throw("span has no free objects")
   302  	}
   303  	// Assume all objects from this span will be allocated in the
   304  	// mcache. If it gets uncached, we'll adjust this.
   305  	atomic.Xadd64(&c.nmalloc, int64(n))
   306  	usedBytes := uintptr(s.allocCount) * s.elemsize
   307  	atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
   308  	if trace.enabled {
   309  		// heap_live changed.
   310  		traceHeapAlloc()
   311  	}
   312  	if gcBlackenEnabled != 0 {
   313  		// heap_live changed.
   314  		gcController.revise()
   315  	}
   316  	freeByteBase := s.freeindex &^ (64 - 1)
   317  	whichByte := freeByteBase / 8
   318  	// Init alloc bits cache.
   319  	s.refillAllocCache(whichByte)
   320  
   321  	// Adjust the allocCache so that s.freeindex corresponds to the low bit in
   322  	// s.allocCache.
   323  	s.allocCache >>= s.freeindex % 64
   324  
   325  	return s
   326  }
   327  
   328  // Return span from an mcache.
   329  //
   330  // s must have a span class corresponding to this
   331  // mcentral and it must not be empty.
   332  func (c *mcentral) uncacheSpan(s *mspan) {
   333  	if !go115NewMCentralImpl {
   334  		c.oldUncacheSpan(s)
   335  		return
   336  	}
   337  	if s.allocCount == 0 {
   338  		throw("uncaching span but s.allocCount == 0")
   339  	}
   340  
   341  	sg := mheap_.sweepgen
   342  	stale := s.sweepgen == sg+1
   343  
   344  	// Fix up sweepgen.
   345  	if stale {
   346  		// Span was cached before sweep began. It's our
   347  		// responsibility to sweep it.
   348  		//
   349  		// Set sweepgen to indicate it's not cached but needs
   350  		// sweeping and can't be allocated from. sweep will
   351  		// set s.sweepgen to indicate s is swept.
   352  		atomic.Store(&s.sweepgen, sg-1)
   353  	} else {
   354  		// Indicate that s is no longer cached.
   355  		atomic.Store(&s.sweepgen, sg)
   356  	}
   357  	n := int(s.nelems) - int(s.allocCount)
   358  
   359  	// Fix up statistics.
   360  	if n > 0 {
   361  		// cacheSpan updated alloc assuming all objects on s
   362  		// were going to be allocated. Adjust for any that
   363  		// weren't. We must do this before potentially
   364  		// sweeping the span.
   365  		atomic.Xadd64(&c.nmalloc, -int64(n))
   366  
   367  		if !stale {
   368  			// (*mcentral).cacheSpan conservatively counted
   369  			// unallocated slots in heap_live. Undo this.
   370  			//
   371  			// If this span was cached before sweep, then
   372  			// heap_live was totally recomputed since
   373  			// caching this span, so we don't do this for
   374  			// stale spans.
   375  			atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
   376  		}
   377  	}
   378  
   379  	// Put the span in the appropriate place.
   380  	if stale {
   381  		// It's stale, so just sweep it. Sweeping will put it on
   382  		// the right list.
   383  		s.sweep(false)
   384  	} else {
   385  		if n > 0 {
   386  			// Put it back on the partial swept list.
   387  			c.partialSwept(sg).push(s)
   388  		} else {
   389  			// There's no free space and it's not stale, so put it on the
   390  			// full swept list.
   391  			c.fullSwept(sg).push(s)
   392  		}
   393  	}
   394  }
   395  
   396  // Return span from an mcache.
   397  //
   398  // For !go115NewMCentralImpl.
   399  func (c *mcentral) oldUncacheSpan(s *mspan) {
   400  	if s.allocCount == 0 {
   401  		throw("uncaching span but s.allocCount == 0")
   402  	}
   403  
   404  	sg := mheap_.sweepgen
   405  	stale := s.sweepgen == sg+1
   406  	if stale {
   407  		// Span was cached before sweep began. It's our
   408  		// responsibility to sweep it.
   409  		//
   410  		// Set sweepgen to indicate it's not cached but needs
   411  		// sweeping and can't be allocated from. sweep will
   412  		// set s.sweepgen to indicate s is swept.
   413  		atomic.Store(&s.sweepgen, sg-1)
   414  	} else {
   415  		// Indicate that s is no longer cached.
   416  		atomic.Store(&s.sweepgen, sg)
   417  	}
   418  
   419  	n := int(s.nelems) - int(s.allocCount)
   420  	if n > 0 {
   421  		// cacheSpan updated alloc assuming all objects on s
   422  		// were going to be allocated. Adjust for any that
   423  		// weren't. We must do this before potentially
   424  		// sweeping the span.
   425  		atomic.Xadd64(&c.nmalloc, -int64(n))
   426  
   427  		lock(&c.lock)
   428  		c.empty.remove(s)
   429  		c.nonempty.insert(s)
   430  		if !stale {
   431  			// mCentral_CacheSpan conservatively counted
   432  			// unallocated slots in heap_live. Undo this.
   433  			//
   434  			// If this span was cached before sweep, then
   435  			// heap_live was totally recomputed since
   436  			// caching this span, so we don't do this for
   437  			// stale spans.
   438  			atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
   439  		}
   440  		unlock(&c.lock)
   441  	}
   442  
   443  	if stale {
   444  		// Now that s is in the right mcentral list, we can
   445  		// sweep it.
   446  		s.sweep(false)
   447  	}
   448  }
   449  
   450  // freeSpan updates c and s after sweeping s.
   451  // It sets s's sweepgen to the latest generation,
   452  // and, based on the number of free objects in s,
   453  // moves s to the appropriate list of c or returns it
   454  // to the heap.
   455  // freeSpan reports whether s was returned to the heap.
   456  // If preserve=true, it does not move s (the caller
   457  // must take care of it).
   458  //
   459  // For !go115NewMCentralImpl.
   460  func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
   461  	if sg := mheap_.sweepgen; s.sweepgen == sg+1 || s.sweepgen == sg+3 {
   462  		throw("freeSpan given cached span")
   463  	}
   464  	s.needzero = 1
   465  
   466  	if preserve {
   467  		// preserve is set only when called from (un)cacheSpan above,
   468  		// the span must be in the empty list.
   469  		if !s.inList() {
   470  			throw("can't preserve unlinked span")
   471  		}
   472  		atomic.Store(&s.sweepgen, mheap_.sweepgen)
   473  		return false
   474  	}
   475  
   476  	lock(&c.lock)
   477  
   478  	// Move to nonempty if necessary.
   479  	if wasempty {
   480  		c.empty.remove(s)
   481  		c.nonempty.insert(s)
   482  	}
   483  
   484  	// delay updating sweepgen until here. This is the signal that
   485  	// the span may be used in an mcache, so it must come after the
   486  	// linked list operations above (actually, just after the
   487  	// lock of c above.)
   488  	atomic.Store(&s.sweepgen, mheap_.sweepgen)
   489  
   490  	if s.allocCount != 0 {
   491  		unlock(&c.lock)
   492  		return false
   493  	}
   494  
   495  	c.nonempty.remove(s)
   496  	unlock(&c.lock)
   497  	mheap_.freeSpan(s)
   498  	return true
   499  }
   500  
   501  // grow allocates a new empty span from the heap and initializes it for c's size class.
   502  func (c *mcentral) grow() *mspan {
   503  	npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
   504  	size := uintptr(class_to_size[c.spanclass.sizeclass()])
   505  
   506  	s := mheap_.alloc(npages, c.spanclass, true)
   507  	if s == nil {
   508  		return nil
   509  	}
   510  
   511  	// Use division by multiplication and shifts to quickly compute:
   512  	// n := (npages << _PageShift) / size
   513  	n := (npages << _PageShift) >> s.divShift * uintptr(s.divMul) >> s.divShift2
   514  	s.limit = s.base() + size*n
   515  	heapBitsForAddr(s.base()).initSpan(s)
   516  	return s
   517  }
   518  

View as plain text