...
Run Format

Source file src/sync/pool.go

     1	// Copyright 2013 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	package sync
     6	
     7	import (
     8		"internal/race"
     9		"runtime"
    10		"sync/atomic"
    11		"unsafe"
    12	)
    13	
    14	// A Pool is a set of temporary objects that may be individually saved and
    15	// retrieved.
    16	//
    17	// Any item stored in the Pool may be removed automatically at any time without
    18	// notification. If the Pool holds the only reference when this happens, the
    19	// item might be deallocated.
    20	//
    21	// A Pool is safe for use by multiple goroutines simultaneously.
    22	//
    23	// Pool's purpose is to cache allocated but unused items for later reuse,
    24	// relieving pressure on the garbage collector. That is, it makes it easy to
    25	// build efficient, thread-safe free lists. However, it is not suitable for all
    26	// free lists.
    27	//
    28	// An appropriate use of a Pool is to manage a group of temporary items
    29	// silently shared among and potentially reused by concurrent independent
    30	// clients of a package. Pool provides a way to amortize allocation overhead
    31	// across many clients.
    32	//
    33	// An example of good use of a Pool is in the fmt package, which maintains a
    34	// dynamically-sized store of temporary output buffers. The store scales under
    35	// load (when many goroutines are actively printing) and shrinks when
    36	// quiescent.
    37	//
    38	// On the other hand, a free list maintained as part of a short-lived object is
    39	// not a suitable use for a Pool, since the overhead does not amortize well in
    40	// that scenario. It is more efficient to have such objects implement their own
    41	// free list.
    42	//
    43	// A Pool must not be copied after first use.
    44	type Pool struct {
    45		noCopy noCopy
    46	
    47		local     unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal
    48		localSize uintptr        // size of the local array
    49	
    50		// New optionally specifies a function to generate
    51		// a value when Get would otherwise return nil.
    52		// It may not be changed concurrently with calls to Get.
    53		New func() interface{}
    54	}
    55	
    56	// Local per-P Pool appendix.
    57	type poolLocal struct {
    58		private interface{}   // Can be used only by the respective P.
    59		shared  []interface{} // Can be used by any P.
    60		Mutex                 // Protects shared.
    61		pad     [128]byte     // Prevents false sharing.
    62	}
    63	
    64	// from runtime
    65	func fastrand() uint32
    66	
    67	var poolRaceHash [128]uint64
    68	
    69	// poolRaceAddr returns an address to use as the synchronization point
    70	// for race detector logic. We don't use the actual pointer stored in x
    71	// directly, for fear of conflicting with other synchronization on that address.
    72	// Instead, we hash the pointer to get an index into poolRaceHash.
    73	// See discussion on golang.org/cl/31589.
    74	func poolRaceAddr(x interface{}) unsafe.Pointer {
    75		ptr := uintptr((*[2]unsafe.Pointer)(unsafe.Pointer(&x))[1])
    76		h := uint32((uint64(uint32(ptr)) * 0x85ebca6b) >> 16)
    77		return unsafe.Pointer(&poolRaceHash[h%uint32(len(poolRaceHash))])
    78	}
    79	
    80	// Put adds x to the pool.
    81	func (p *Pool) Put(x interface{}) {
    82		if x == nil {
    83			return
    84		}
    85		if race.Enabled {
    86			if fastrand()%4 == 0 {
    87				// Randomly drop x on floor.
    88				return
    89			}
    90			race.ReleaseMerge(poolRaceAddr(x))
    91			race.Disable()
    92		}
    93		l := p.pin()
    94		if l.private == nil {
    95			l.private = x
    96			x = nil
    97		}
    98		runtime_procUnpin()
    99		if x != nil {
   100			l.Lock()
   101			l.shared = append(l.shared, x)
   102			l.Unlock()
   103		}
   104		if race.Enabled {
   105			race.Enable()
   106		}
   107	}
   108	
   109	// Get selects an arbitrary item from the Pool, removes it from the
   110	// Pool, and returns it to the caller.
   111	// Get may choose to ignore the pool and treat it as empty.
   112	// Callers should not assume any relation between values passed to Put and
   113	// the values returned by Get.
   114	//
   115	// If Get would otherwise return nil and p.New is non-nil, Get returns
   116	// the result of calling p.New.
   117	func (p *Pool) Get() interface{} {
   118		if race.Enabled {
   119			race.Disable()
   120		}
   121		l := p.pin()
   122		x := l.private
   123		l.private = nil
   124		runtime_procUnpin()
   125		if x == nil {
   126			l.Lock()
   127			last := len(l.shared) - 1
   128			if last >= 0 {
   129				x = l.shared[last]
   130				l.shared = l.shared[:last]
   131			}
   132			l.Unlock()
   133			if x == nil {
   134				x = p.getSlow()
   135			}
   136		}
   137		if race.Enabled {
   138			race.Enable()
   139			if x != nil {
   140				race.Acquire(poolRaceAddr(x))
   141			}
   142		}
   143		if x == nil && p.New != nil {
   144			x = p.New()
   145		}
   146		return x
   147	}
   148	
   149	func (p *Pool) getSlow() (x interface{}) {
   150		// See the comment in pin regarding ordering of the loads.
   151		size := atomic.LoadUintptr(&p.localSize) // load-acquire
   152		local := p.local                         // load-consume
   153		// Try to steal one element from other procs.
   154		pid := runtime_procPin()
   155		runtime_procUnpin()
   156		for i := 0; i < int(size); i++ {
   157			l := indexLocal(local, (pid+i+1)%int(size))
   158			l.Lock()
   159			last := len(l.shared) - 1
   160			if last >= 0 {
   161				x = l.shared[last]
   162				l.shared = l.shared[:last]
   163				l.Unlock()
   164				break
   165			}
   166			l.Unlock()
   167		}
   168		return x
   169	}
   170	
   171	// pin pins the current goroutine to P, disables preemption and returns poolLocal pool for the P.
   172	// Caller must call runtime_procUnpin() when done with the pool.
   173	func (p *Pool) pin() *poolLocal {
   174		pid := runtime_procPin()
   175		// In pinSlow we store to localSize and then to local, here we load in opposite order.
   176		// Since we've disabled preemption, GC cannot happen in between.
   177		// Thus here we must observe local at least as large localSize.
   178		// We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
   179		s := atomic.LoadUintptr(&p.localSize) // load-acquire
   180		l := p.local                          // load-consume
   181		if uintptr(pid) < s {
   182			return indexLocal(l, pid)
   183		}
   184		return p.pinSlow()
   185	}
   186	
   187	func (p *Pool) pinSlow() *poolLocal {
   188		// Retry under the mutex.
   189		// Can not lock the mutex while pinned.
   190		runtime_procUnpin()
   191		allPoolsMu.Lock()
   192		defer allPoolsMu.Unlock()
   193		pid := runtime_procPin()
   194		// poolCleanup won't be called while we are pinned.
   195		s := p.localSize
   196		l := p.local
   197		if uintptr(pid) < s {
   198			return indexLocal(l, pid)
   199		}
   200		if p.local == nil {
   201			allPools = append(allPools, p)
   202		}
   203		// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
   204		size := runtime.GOMAXPROCS(0)
   205		local := make([]poolLocal, size)
   206		atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
   207		atomic.StoreUintptr(&p.localSize, uintptr(size))         // store-release
   208		return &local[pid]
   209	}
   210	
   211	func poolCleanup() {
   212		// This function is called with the world stopped, at the beginning of a garbage collection.
   213		// It must not allocate and probably should not call any runtime functions.
   214		// Defensively zero out everything, 2 reasons:
   215		// 1. To prevent false retention of whole Pools.
   216		// 2. If GC happens while a goroutine works with l.shared in Put/Get,
   217		//    it will retain whole Pool. So next cycle memory consumption would be doubled.
   218		for i, p := range allPools {
   219			allPools[i] = nil
   220			for i := 0; i < int(p.localSize); i++ {
   221				l := indexLocal(p.local, i)
   222				l.private = nil
   223				for j := range l.shared {
   224					l.shared[j] = nil
   225				}
   226				l.shared = nil
   227			}
   228			p.local = nil
   229			p.localSize = 0
   230		}
   231		allPools = []*Pool{}
   232	}
   233	
   234	var (
   235		allPoolsMu Mutex
   236		allPools   []*Pool
   237	)
   238	
   239	func init() {
   240		runtime_registerPoolCleanup(poolCleanup)
   241	}
   242	
   243	func indexLocal(l unsafe.Pointer, i int) *poolLocal {
   244		return &(*[1000000]poolLocal)(l)[i]
   245	}
   246	
   247	// Implemented in runtime.
   248	func runtime_registerPoolCleanup(cleanup func())
   249	func runtime_procPin() int
   250	func runtime_procUnpin()
   251	

View as plain text