...
Run Format

Source file src/runtime/mcentral.go

Documentation: runtime

  // Copyright 2009 The Go Authors. All rights reserved.
  // Use of this source code is governed by a BSD-style
  // license that can be found in the LICENSE file.
  
  // Central free lists.
  //
  // See malloc.go for an overview.
  //
  // The MCentral doesn't actually contain the list of free objects; the MSpan does.
  // Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
  // and those that are completely allocated (c->empty).
  
  package runtime
  
  import "runtime/internal/atomic"
  
  // Central list of free objects of a given size.
  //
  //go:notinheap
  type mcentral struct {
  	lock      mutex
  	sizeclass int32
  	nonempty  mSpanList // list of spans with a free object, ie a nonempty free list
  	empty     mSpanList // list of spans with no free objects (or cached in an mcache)
  }
  
  // Initialize a single central free list.
  func (c *mcentral) init(sizeclass int32) {
  	c.sizeclass = sizeclass
  	c.nonempty.init()
  	c.empty.init()
  }
  
  // Allocate a span to use in an MCache.
  func (c *mcentral) cacheSpan() *mspan {
  	// Deduct credit for this span allocation and sweep if necessary.
  	spanBytes := uintptr(class_to_allocnpages[c.sizeclass]) * _PageSize
  	deductSweepCredit(spanBytes, 0)
  
  	lock(&c.lock)
  	sg := mheap_.sweepgen
  retry:
  	var s *mspan
  	for s = c.nonempty.first; s != nil; s = s.next {
  		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
  			c.nonempty.remove(s)
  			c.empty.insertBack(s)
  			unlock(&c.lock)
  			s.sweep(true)
  			goto havespan
  		}
  		if s.sweepgen == sg-1 {
  			// the span is being swept by background sweeper, skip
  			continue
  		}
  		// we have a nonempty span that does not require sweeping, allocate from it
  		c.nonempty.remove(s)
  		c.empty.insertBack(s)
  		unlock(&c.lock)
  		goto havespan
  	}
  
  	for s = c.empty.first; s != nil; s = s.next {
  		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
  			// we have an empty span that requires sweeping,
  			// sweep it and see if we can free some space in it
  			c.empty.remove(s)
  			// swept spans are at the end of the list
  			c.empty.insertBack(s)
  			unlock(&c.lock)
  			s.sweep(true)
  			freeIndex := s.nextFreeIndex()
  			if freeIndex != s.nelems {
  				s.freeindex = freeIndex
  				goto havespan
  			}
  			lock(&c.lock)
  			// the span is still empty after sweep
  			// it is already in the empty list, so just retry
  			goto retry
  		}
  		if s.sweepgen == sg-1 {
  			// the span is being swept by background sweeper, skip
  			continue
  		}
  		// already swept empty span,
  		// all subsequent ones must also be either swept or in process of sweeping
  		break
  	}
  	unlock(&c.lock)
  
  	// Replenish central list if empty.
  	s = c.grow()
  	if s == nil {
  		return nil
  	}
  	lock(&c.lock)
  	c.empty.insertBack(s)
  	unlock(&c.lock)
  
  	// At this point s is a non-empty span, queued at the end of the empty list,
  	// c is unlocked.
  havespan:
  	cap := int32((s.npages << _PageShift) / s.elemsize)
  	n := cap - int32(s.allocCount)
  	if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
  		throw("span has no free objects")
  	}
  	usedBytes := uintptr(s.allocCount) * s.elemsize
  	if usedBytes > 0 {
  		reimburseSweepCredit(usedBytes)
  	}
  	atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
  	if trace.enabled {
  		// heap_live changed.
  		traceHeapAlloc()
  	}
  	if gcBlackenEnabled != 0 {
  		// heap_live changed.
  		gcController.revise()
  	}
  	s.incache = true
  	freeByteBase := s.freeindex &^ (64 - 1)
  	whichByte := freeByteBase / 8
  	// Init alloc bits cache.
  	s.refillAllocCache(whichByte)
  
  	// Adjust the allocCache so that s.freeindex corresponds to the low bit in
  	// s.allocCache.
  	s.allocCache >>= s.freeindex % 64
  
  	return s
  }
  
  // Return span from an MCache.
  func (c *mcentral) uncacheSpan(s *mspan) {
  	lock(&c.lock)
  
  	s.incache = false
  
  	if s.allocCount == 0 {
  		throw("uncaching span but s.allocCount == 0")
  	}
  
  	cap := int32((s.npages << _PageShift) / s.elemsize)
  	n := cap - int32(s.allocCount)
  	if n > 0 {
  		c.empty.remove(s)
  		c.nonempty.insert(s)
  		// mCentral_CacheSpan conservatively counted
  		// unallocated slots in heap_live. Undo this.
  		atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
  	}
  	unlock(&c.lock)
  }
  
  // freeSpan updates c and s after sweeping s.
  // It sets s's sweepgen to the latest generation,
  // and, based on the number of free objects in s,
  // moves s to the appropriate list of c or returns it
  // to the heap.
  // freeSpan returns true if s was returned to the heap.
  // If preserve=true, it does not move s (the caller
  // must take care of it).
  func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
  	if s.incache {
  		throw("freeSpan given cached span")
  	}
  	s.needzero = 1
  
  	if preserve {
  		// preserve is set only when called from MCentral_CacheSpan above,
  		// the span must be in the empty list.
  		if !s.inList() {
  			throw("can't preserve unlinked span")
  		}
  		atomic.Store(&s.sweepgen, mheap_.sweepgen)
  		return false
  	}
  
  	lock(&c.lock)
  
  	// Move to nonempty if necessary.
  	if wasempty {
  		c.empty.remove(s)
  		c.nonempty.insert(s)
  	}
  
  	// delay updating sweepgen until here. This is the signal that
  	// the span may be used in an MCache, so it must come after the
  	// linked list operations above (actually, just after the
  	// lock of c above.)
  	atomic.Store(&s.sweepgen, mheap_.sweepgen)
  
  	if s.allocCount != 0 {
  		unlock(&c.lock)
  		return false
  	}
  
  	c.nonempty.remove(s)
  	unlock(&c.lock)
  	mheap_.freeSpan(s, 0)
  	return true
  }
  
  // grow allocates a new empty span from the heap and initializes it for c's size class.
  func (c *mcentral) grow() *mspan {
  	npages := uintptr(class_to_allocnpages[c.sizeclass])
  	size := uintptr(class_to_size[c.sizeclass])
  	n := (npages << _PageShift) / size
  
  	s := mheap_.alloc(npages, c.sizeclass, false, true)
  	if s == nil {
  		return nil
  	}
  
  	p := s.base()
  	s.limit = p + size*n
  
  	heapBitsForSpan(s.base()).initSpan(s)
  	return s
  }
  

View as plain text