Source file src/runtime/map.go

Documentation: runtime

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go's map type.
     8  //
     9  // A map is just a hash table. The data is arranged
    10  // into an array of buckets. Each bucket contains up to
    11  // 8 key/elem pairs. The low-order bits of the hash are
    12  // used to select a bucket. Each bucket contains a few
    13  // high-order bits of each hash to distinguish the entries
    14  // within a single bucket.
    15  //
    16  // If more than 8 keys hash to a bucket, we chain on
    17  // extra buckets.
    18  //
    19  // When the hashtable grows, we allocate a new array
    20  // of buckets twice as big. Buckets are incrementally
    21  // copied from the old bucket array to the new bucket array.
    22  //
    23  // Map iterators walk through the array of buckets and
    24  // return the keys in walk order (bucket #, then overflow
    25  // chain order, then bucket index).  To maintain iteration
    26  // semantics, we never move keys within their bucket (if
    27  // we did, keys might be returned 0 or 2 times).  When
    28  // growing the table, iterators remain iterating through the
    29  // old table and must check the new table if the bucket
    30  // they are iterating through has been moved ("evacuated")
    31  // to the new table.
    32  
    33  // Picking loadFactor: too large and we have lots of overflow
    34  // buckets, too small and we waste a lot of space. I wrote
    35  // a simple program to check some stats for different loads:
    36  // (64-bit, 8 byte keys and elems)
    37  //  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
    38  //        4.00         2.13        20.77         3.00         4.00
    39  //        4.50         4.05        17.30         3.25         4.50
    40  //        5.00         6.85        14.77         3.50         5.00
    41  //        5.50        10.55        12.94         3.75         5.50
    42  //        6.00        15.27        11.67         4.00         6.00
    43  //        6.50        20.90        10.79         4.25         6.50
    44  //        7.00        27.14        10.15         4.50         7.00
    45  //        7.50        34.03         9.73         4.75         7.50
    46  //        8.00        41.10         9.40         5.00         8.00
    47  //
    48  // %overflow   = percentage of buckets which have an overflow bucket
    49  // bytes/entry = overhead bytes used per key/elem pair
    50  // hitprobe    = # of entries to check when looking up a present key
    51  // missprobe   = # of entries to check when looking up an absent key
    52  //
    53  // Keep in mind this data is for maximally loaded tables, i.e. just
    54  // before the table grows. Typical tables will be somewhat less loaded.
    55  
    56  import (
    57  	"runtime/internal/atomic"
    58  	"runtime/internal/math"
    59  	"runtime/internal/sys"
    60  	"unsafe"
    61  )
    62  
    63  const (
    64  	// Maximum number of key/elem pairs a bucket can hold.
    65  	bucketCntBits = 3
    66  	bucketCnt     = 1 << bucketCntBits
    67  
    68  	// Maximum average load of a bucket that triggers growth is 6.5.
    69  	// Represent as loadFactorNum/loadFactDen, to allow integer math.
    70  	loadFactorNum = 13
    71  	loadFactorDen = 2
    72  
    73  	// Maximum key or elem size to keep inline (instead of mallocing per element).
    74  	// Must fit in a uint8.
    75  	// Fast versions cannot handle big elems - the cutoff size for
    76  	// fast versions in cmd/compile/internal/gc/walk.go must be at most this elem.
    77  	maxKeySize  = 128
    78  	maxElemSize = 128
    79  
    80  	// data offset should be the size of the bmap struct, but needs to be
    81  	// aligned correctly. For amd64p32 this means 64-bit alignment
    82  	// even though pointers are 32 bit.
    83  	dataOffset = unsafe.Offsetof(struct {
    84  		b bmap
    85  		v int64
    86  	}{}.v)
    87  
    88  	// Possible tophash values. We reserve a few possibilities for special marks.
    89  	// Each bucket (including its overflow buckets, if any) will have either all or none of its
    90  	// entries in the evacuated* states (except during the evacuate() method, which only happens
    91  	// during map writes and thus no one else can observe the map during that time).
    92  	emptyRest      = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
    93  	emptyOne       = 1 // this cell is empty
    94  	evacuatedX     = 2 // key/elem is valid.  Entry has been evacuated to first half of larger table.
    95  	evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
    96  	evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
    97  	minTopHash     = 5 // minimum tophash for a normal filled cell.
    98  
    99  	// flags
   100  	iterator     = 1 // there may be an iterator using buckets
   101  	oldIterator  = 2 // there may be an iterator using oldbuckets
   102  	hashWriting  = 4 // a goroutine is writing to the map
   103  	sameSizeGrow = 8 // the current map growth is to a new map of the same size
   104  
   105  	// sentinel bucket ID for iterator checks
   106  	noCheck = 1<<(8*sys.PtrSize) - 1
   107  )
   108  
   109  // isEmpty reports whether the given tophash array entry represents an empty bucket entry.
   110  func isEmpty(x uint8) bool {
   111  	return x <= emptyOne
   112  }
   113  
   114  // A header for a Go map.
   115  type hmap struct {
   116  	// Note: the format of the hmap is also encoded in cmd/compile/internal/gc/reflect.go.
   117  	// Make sure this stays in sync with the compiler's definition.
   118  	count     int // # live cells == size of map.  Must be first (used by len() builtin)
   119  	flags     uint8
   120  	B         uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
   121  	noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
   122  	hash0     uint32 // hash seed
   123  
   124  	buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
   125  	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
   126  	nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
   127  
   128  	extra *mapextra // optional fields
   129  }
   130  
   131  // mapextra holds fields that are not present on all maps.
   132  type mapextra struct {
   133  	// If both key and elem do not contain pointers and are inline, then we mark bucket
   134  	// type as containing no pointers. This avoids scanning such maps.
   135  	// However, bmap.overflow is a pointer. In order to keep overflow buckets
   136  	// alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
   137  	// overflow and oldoverflow are only used if key and elem do not contain pointers.
   138  	// overflow contains overflow buckets for hmap.buckets.
   139  	// oldoverflow contains overflow buckets for hmap.oldbuckets.
   140  	// The indirection allows to store a pointer to the slice in hiter.
   141  	overflow    *[]*bmap
   142  	oldoverflow *[]*bmap
   143  
   144  	// nextOverflow holds a pointer to a free overflow bucket.
   145  	nextOverflow *bmap
   146  }
   147  
   148  // A bucket for a Go map.
   149  type bmap struct {
   150  	// tophash generally contains the top byte of the hash value
   151  	// for each key in this bucket. If tophash[0] < minTopHash,
   152  	// tophash[0] is a bucket evacuation state instead.
   153  	tophash [bucketCnt]uint8
   154  	// Followed by bucketCnt keys and then bucketCnt elems.
   155  	// NOTE: packing all the keys together and then all the elems together makes the
   156  	// code a bit more complicated than alternating key/elem/key/elem/... but it allows
   157  	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
   158  	// Followed by an overflow pointer.
   159  }
   160  
   161  // A hash iteration structure.
   162  // If you modify hiter, also change cmd/compile/internal/gc/reflect.go to indicate
   163  // the layout of this structure.
   164  type hiter struct {
   165  	key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/internal/gc/range.go).
   166  	elem        unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
   167  	t           *maptype
   168  	h           *hmap
   169  	buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
   170  	bptr        *bmap          // current bucket
   171  	overflow    *[]*bmap       // keeps overflow buckets of hmap.buckets alive
   172  	oldoverflow *[]*bmap       // keeps overflow buckets of hmap.oldbuckets alive
   173  	startBucket uintptr        // bucket iteration started at
   174  	offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
   175  	wrapped     bool           // already wrapped around from end of bucket array to beginning
   176  	B           uint8
   177  	i           uint8
   178  	bucket      uintptr
   179  	checkBucket uintptr
   180  }
   181  
   182  // bucketShift returns 1<<b, optimized for code generation.
   183  func bucketShift(b uint8) uintptr {
   184  	// Masking the shift amount allows overflow checks to be elided.
   185  	return uintptr(1) << (b & (sys.PtrSize*8 - 1))
   186  }
   187  
   188  // bucketMask returns 1<<b - 1, optimized for code generation.
   189  func bucketMask(b uint8) uintptr {
   190  	return bucketShift(b) - 1
   191  }
   192  
   193  // tophash calculates the tophash value for hash.
   194  func tophash(hash uintptr) uint8 {
   195  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   196  	if top < minTopHash {
   197  		top += minTopHash
   198  	}
   199  	return top
   200  }
   201  
   202  func evacuated(b *bmap) bool {
   203  	h := b.tophash[0]
   204  	return h > emptyOne && h < minTopHash
   205  }
   206  
   207  func (b *bmap) overflow(t *maptype) *bmap {
   208  	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
   209  }
   210  
   211  func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
   212  	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
   213  }
   214  
   215  func (b *bmap) keys() unsafe.Pointer {
   216  	return add(unsafe.Pointer(b), dataOffset)
   217  }
   218  
   219  // incrnoverflow increments h.noverflow.
   220  // noverflow counts the number of overflow buckets.
   221  // This is used to trigger same-size map growth.
   222  // See also tooManyOverflowBuckets.
   223  // To keep hmap small, noverflow is a uint16.
   224  // When there are few buckets, noverflow is an exact count.
   225  // When there are many buckets, noverflow is an approximate count.
   226  func (h *hmap) incrnoverflow() {
   227  	// We trigger same-size map growth if there are
   228  	// as many overflow buckets as buckets.
   229  	// We need to be able to count to 1<<h.B.
   230  	if h.B < 16 {
   231  		h.noverflow++
   232  		return
   233  	}
   234  	// Increment with probability 1/(1<<(h.B-15)).
   235  	// When we reach 1<<15 - 1, we will have approximately
   236  	// as many overflow buckets as buckets.
   237  	mask := uint32(1)<<(h.B-15) - 1
   238  	// Example: if h.B == 18, then mask == 7,
   239  	// and fastrand & 7 == 0 with probability 1/8.
   240  	if fastrand()&mask == 0 {
   241  		h.noverflow++
   242  	}
   243  }
   244  
   245  func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
   246  	var ovf *bmap
   247  	if h.extra != nil && h.extra.nextOverflow != nil {
   248  		// We have preallocated overflow buckets available.
   249  		// See makeBucketArray for more details.
   250  		ovf = h.extra.nextOverflow
   251  		if ovf.overflow(t) == nil {
   252  			// We're not at the end of the preallocated overflow buckets. Bump the pointer.
   253  			h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
   254  		} else {
   255  			// This is the last preallocated overflow bucket.
   256  			// Reset the overflow pointer on this bucket,
   257  			// which was set to a non-nil sentinel value.
   258  			ovf.setoverflow(t, nil)
   259  			h.extra.nextOverflow = nil
   260  		}
   261  	} else {
   262  		ovf = (*bmap)(newobject(t.bucket))
   263  	}
   264  	h.incrnoverflow()
   265  	if t.bucket.ptrdata == 0 {
   266  		h.createOverflow()
   267  		*h.extra.overflow = append(*h.extra.overflow, ovf)
   268  	}
   269  	b.setoverflow(t, ovf)
   270  	return ovf
   271  }
   272  
   273  func (h *hmap) createOverflow() {
   274  	if h.extra == nil {
   275  		h.extra = new(mapextra)
   276  	}
   277  	if h.extra.overflow == nil {
   278  		h.extra.overflow = new([]*bmap)
   279  	}
   280  }
   281  
   282  func makemap64(t *maptype, hint int64, h *hmap) *hmap {
   283  	if int64(int(hint)) != hint {
   284  		hint = 0
   285  	}
   286  	return makemap(t, int(hint), h)
   287  }
   288  
   289  // makemap_small implements Go map creation for make(map[k]v) and
   290  // make(map[k]v, hint) when hint is known to be at most bucketCnt
   291  // at compile time and the map needs to be allocated on the heap.
   292  func makemap_small() *hmap {
   293  	h := new(hmap)
   294  	h.hash0 = fastrand()
   295  	return h
   296  }
   297  
   298  // makemap implements Go map creation for make(map[k]v, hint).
   299  // If the compiler has determined that the map or the first bucket
   300  // can be created on the stack, h and/or bucket may be non-nil.
   301  // If h != nil, the map can be created directly in h.
   302  // If h.buckets != nil, bucket pointed to can be used as the first bucket.
   303  func makemap(t *maptype, hint int, h *hmap) *hmap {
   304  	mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.size)
   305  	if overflow || mem > maxAlloc {
   306  		hint = 0
   307  	}
   308  
   309  	// initialize Hmap
   310  	if h == nil {
   311  		h = new(hmap)
   312  	}
   313  	h.hash0 = fastrand()
   314  
   315  	// Find the size parameter B which will hold the requested # of elements.
   316  	// For hint < 0 overLoadFactor returns false since hint < bucketCnt.
   317  	B := uint8(0)
   318  	for overLoadFactor(hint, B) {
   319  		B++
   320  	}
   321  	h.B = B
   322  
   323  	// allocate initial hash table
   324  	// if B == 0, the buckets field is allocated lazily later (in mapassign)
   325  	// If hint is large zeroing this memory could take a while.
   326  	if h.B != 0 {
   327  		var nextOverflow *bmap
   328  		h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
   329  		if nextOverflow != nil {
   330  			h.extra = new(mapextra)
   331  			h.extra.nextOverflow = nextOverflow
   332  		}
   333  	}
   334  
   335  	return h
   336  }
   337  
   338  // makeBucketArray initializes a backing array for map buckets.
   339  // 1<<b is the minimum number of buckets to allocate.
   340  // dirtyalloc should either be nil or a bucket array previously
   341  // allocated by makeBucketArray with the same t and b parameters.
   342  // If dirtyalloc is nil a new backing array will be alloced and
   343  // otherwise dirtyalloc will be cleared and reused as backing array.
   344  func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
   345  	base := bucketShift(b)
   346  	nbuckets := base
   347  	// For small b, overflow buckets are unlikely.
   348  	// Avoid the overhead of the calculation.
   349  	if b >= 4 {
   350  		// Add on the estimated number of overflow buckets
   351  		// required to insert the median number of elements
   352  		// used with this value of b.
   353  		nbuckets += bucketShift(b - 4)
   354  		sz := t.bucket.size * nbuckets
   355  		up := roundupsize(sz)
   356  		if up != sz {
   357  			nbuckets = up / t.bucket.size
   358  		}
   359  	}
   360  
   361  	if dirtyalloc == nil {
   362  		buckets = newarray(t.bucket, int(nbuckets))
   363  	} else {
   364  		// dirtyalloc was previously generated by
   365  		// the above newarray(t.bucket, int(nbuckets))
   366  		// but may not be empty.
   367  		buckets = dirtyalloc
   368  		size := t.bucket.size * nbuckets
   369  		if t.bucket.ptrdata != 0 {
   370  			memclrHasPointers(buckets, size)
   371  		} else {
   372  			memclrNoHeapPointers(buckets, size)
   373  		}
   374  	}
   375  
   376  	if base != nbuckets {
   377  		// We preallocated some overflow buckets.
   378  		// To keep the overhead of tracking these overflow buckets to a minimum,
   379  		// we use the convention that if a preallocated overflow bucket's overflow
   380  		// pointer is nil, then there are more available by bumping the pointer.
   381  		// We need a safe non-nil pointer for the last overflow bucket; just use buckets.
   382  		nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
   383  		last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
   384  		last.setoverflow(t, (*bmap)(buckets))
   385  	}
   386  	return buckets, nextOverflow
   387  }
   388  
   389  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
   390  // it will return a reference to the zero object for the elem type if
   391  // the key is not in the map.
   392  // NOTE: The returned pointer may keep the whole map live, so don't
   393  // hold onto it for very long.
   394  func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   395  	if raceenabled && h != nil {
   396  		callerpc := getcallerpc()
   397  		pc := funcPC(mapaccess1)
   398  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   399  		raceReadObjectPC(t.key, key, callerpc, pc)
   400  	}
   401  	if msanenabled && h != nil {
   402  		msanread(key, t.key.size)
   403  	}
   404  	if h == nil || h.count == 0 {
   405  		if t.hashMightPanic() {
   406  			t.key.alg.hash(key, 0) // see issue 23734
   407  		}
   408  		return unsafe.Pointer(&zeroVal[0])
   409  	}
   410  	if h.flags&hashWriting != 0 {
   411  		throw("concurrent map read and map write")
   412  	}
   413  	alg := t.key.alg
   414  	hash := alg.hash(key, uintptr(h.hash0))
   415  	m := bucketMask(h.B)
   416  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   417  	if c := h.oldbuckets; c != nil {
   418  		if !h.sameSizeGrow() {
   419  			// There used to be half as many buckets; mask down one more power of two.
   420  			m >>= 1
   421  		}
   422  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   423  		if !evacuated(oldb) {
   424  			b = oldb
   425  		}
   426  	}
   427  	top := tophash(hash)
   428  bucketloop:
   429  	for ; b != nil; b = b.overflow(t) {
   430  		for i := uintptr(0); i < bucketCnt; i++ {
   431  			if b.tophash[i] != top {
   432  				if b.tophash[i] == emptyRest {
   433  					break bucketloop
   434  				}
   435  				continue
   436  			}
   437  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   438  			if t.indirectkey() {
   439  				k = *((*unsafe.Pointer)(k))
   440  			}
   441  			if alg.equal(key, k) {
   442  				e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
   443  				if t.indirectelem() {
   444  					e = *((*unsafe.Pointer)(e))
   445  				}
   446  				return e
   447  			}
   448  		}
   449  	}
   450  	return unsafe.Pointer(&zeroVal[0])
   451  }
   452  
   453  func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
   454  	if raceenabled && h != nil {
   455  		callerpc := getcallerpc()
   456  		pc := funcPC(mapaccess2)
   457  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   458  		raceReadObjectPC(t.key, key, callerpc, pc)
   459  	}
   460  	if msanenabled && h != nil {
   461  		msanread(key, t.key.size)
   462  	}
   463  	if h == nil || h.count == 0 {
   464  		if t.hashMightPanic() {
   465  			t.key.alg.hash(key, 0) // see issue 23734
   466  		}
   467  		return unsafe.Pointer(&zeroVal[0]), false
   468  	}
   469  	if h.flags&hashWriting != 0 {
   470  		throw("concurrent map read and map write")
   471  	}
   472  	alg := t.key.alg
   473  	hash := alg.hash(key, uintptr(h.hash0))
   474  	m := bucketMask(h.B)
   475  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   476  	if c := h.oldbuckets; c != nil {
   477  		if !h.sameSizeGrow() {
   478  			// There used to be half as many buckets; mask down one more power of two.
   479  			m >>= 1
   480  		}
   481  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
   482  		if !evacuated(oldb) {
   483  			b = oldb
   484  		}
   485  	}
   486  	top := tophash(hash)
   487  bucketloop:
   488  	for ; b != nil; b = b.overflow(t) {
   489  		for i := uintptr(0); i < bucketCnt; i++ {
   490  			if b.tophash[i] != top {
   491  				if b.tophash[i] == emptyRest {
   492  					break bucketloop
   493  				}
   494  				continue
   495  			}
   496  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   497  			if t.indirectkey() {
   498  				k = *((*unsafe.Pointer)(k))
   499  			}
   500  			if alg.equal(key, k) {
   501  				e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
   502  				if t.indirectelem() {
   503  					e = *((*unsafe.Pointer)(e))
   504  				}
   505  				return e, true
   506  			}
   507  		}
   508  	}
   509  	return unsafe.Pointer(&zeroVal[0]), false
   510  }
   511  
   512  // returns both key and elem. Used by map iterator
   513  func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
   514  	if h == nil || h.count == 0 {
   515  		return nil, nil
   516  	}
   517  	alg := t.key.alg
   518  	hash := alg.hash(key, uintptr(h.hash0))
   519  	m := bucketMask(h.B)
   520  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   521  	if c := h.oldbuckets; c != nil {
   522  		if !h.sameSizeGrow() {
   523  			// There used to be half as many buckets; mask down one more power of two.
   524  			m >>= 1
   525  		}
   526  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
   527  		if !evacuated(oldb) {
   528  			b = oldb
   529  		}
   530  	}
   531  	top := tophash(hash)
   532  bucketloop:
   533  	for ; b != nil; b = b.overflow(t) {
   534  		for i := uintptr(0); i < bucketCnt; i++ {
   535  			if b.tophash[i] != top {
   536  				if b.tophash[i] == emptyRest {
   537  					break bucketloop
   538  				}
   539  				continue
   540  			}
   541  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   542  			if t.indirectkey() {
   543  				k = *((*unsafe.Pointer)(k))
   544  			}
   545  			if alg.equal(key, k) {
   546  				e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
   547  				if t.indirectelem() {
   548  					e = *((*unsafe.Pointer)(e))
   549  				}
   550  				return k, e
   551  			}
   552  		}
   553  	}
   554  	return nil, nil
   555  }
   556  
   557  func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
   558  	e := mapaccess1(t, h, key)
   559  	if e == unsafe.Pointer(&zeroVal[0]) {
   560  		return zero
   561  	}
   562  	return e
   563  }
   564  
   565  func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
   566  	e := mapaccess1(t, h, key)
   567  	if e == unsafe.Pointer(&zeroVal[0]) {
   568  		return zero, false
   569  	}
   570  	return e, true
   571  }
   572  
   573  // Like mapaccess, but allocates a slot for the key if it is not present in the map.
   574  func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   575  	if h == nil {
   576  		panic(plainError("assignment to entry in nil map"))
   577  	}
   578  	if raceenabled {
   579  		callerpc := getcallerpc()
   580  		pc := funcPC(mapassign)
   581  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   582  		raceReadObjectPC(t.key, key, callerpc, pc)
   583  	}
   584  	if msanenabled {
   585  		msanread(key, t.key.size)
   586  	}
   587  	if h.flags&hashWriting != 0 {
   588  		throw("concurrent map writes")
   589  	}
   590  	alg := t.key.alg
   591  	hash := alg.hash(key, uintptr(h.hash0))
   592  
   593  	// Set hashWriting after calling alg.hash, since alg.hash may panic,
   594  	// in which case we have not actually done a write.
   595  	h.flags ^= hashWriting
   596  
   597  	if h.buckets == nil {
   598  		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
   599  	}
   600  
   601  again:
   602  	bucket := hash & bucketMask(h.B)
   603  	if h.growing() {
   604  		growWork(t, h, bucket)
   605  	}
   606  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   607  	top := tophash(hash)
   608  
   609  	var inserti *uint8
   610  	var insertk unsafe.Pointer
   611  	var elem unsafe.Pointer
   612  bucketloop:
   613  	for {
   614  		for i := uintptr(0); i < bucketCnt; i++ {
   615  			if b.tophash[i] != top {
   616  				if isEmpty(b.tophash[i]) && inserti == nil {
   617  					inserti = &b.tophash[i]
   618  					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   619  					elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
   620  				}
   621  				if b.tophash[i] == emptyRest {
   622  					break bucketloop
   623  				}
   624  				continue
   625  			}
   626  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   627  			if t.indirectkey() {
   628  				k = *((*unsafe.Pointer)(k))
   629  			}
   630  			if !alg.equal(key, k) {
   631  				continue
   632  			}
   633  			// already have a mapping for key. Update it.
   634  			if t.needkeyupdate() {
   635  				typedmemmove(t.key, k, key)
   636  			}
   637  			elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
   638  			goto done
   639  		}
   640  		ovf := b.overflow(t)
   641  		if ovf == nil {
   642  			break
   643  		}
   644  		b = ovf
   645  	}
   646  
   647  	// Did not find mapping for key. Allocate new cell & add entry.
   648  
   649  	// If we hit the max load factor or we have too many overflow buckets,
   650  	// and we're not already in the middle of growing, start growing.
   651  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   652  		hashGrow(t, h)
   653  		goto again // Growing the table invalidates everything, so try again
   654  	}
   655  
   656  	if inserti == nil {
   657  		// all current buckets are full, allocate a new one.
   658  		newb := h.newoverflow(t, b)
   659  		inserti = &newb.tophash[0]
   660  		insertk = add(unsafe.Pointer(newb), dataOffset)
   661  		elem = add(insertk, bucketCnt*uintptr(t.keysize))
   662  	}
   663  
   664  	// store new key/elem at insert position
   665  	if t.indirectkey() {
   666  		kmem := newobject(t.key)
   667  		*(*unsafe.Pointer)(insertk) = kmem
   668  		insertk = kmem
   669  	}
   670  	if t.indirectelem() {
   671  		vmem := newobject(t.elem)
   672  		*(*unsafe.Pointer)(elem) = vmem
   673  	}
   674  	typedmemmove(t.key, insertk, key)
   675  	*inserti = top
   676  	h.count++
   677  
   678  done:
   679  	if h.flags&hashWriting == 0 {
   680  		throw("concurrent map writes")
   681  	}
   682  	h.flags &^= hashWriting
   683  	if t.indirectelem() {
   684  		elem = *((*unsafe.Pointer)(elem))
   685  	}
   686  	return elem
   687  }
   688  
   689  func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   690  	if raceenabled && h != nil {
   691  		callerpc := getcallerpc()
   692  		pc := funcPC(mapdelete)
   693  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   694  		raceReadObjectPC(t.key, key, callerpc, pc)
   695  	}
   696  	if msanenabled && h != nil {
   697  		msanread(key, t.key.size)
   698  	}
   699  	if h == nil || h.count == 0 {
   700  		if t.hashMightPanic() {
   701  			t.key.alg.hash(key, 0) // see issue 23734
   702  		}
   703  		return
   704  	}
   705  	if h.flags&hashWriting != 0 {
   706  		throw("concurrent map writes")
   707  	}
   708  
   709  	alg := t.key.alg
   710  	hash := alg.hash(key, uintptr(h.hash0))
   711  
   712  	// Set hashWriting after calling alg.hash, since alg.hash may panic,
   713  	// in which case we have not actually done a write (delete).
   714  	h.flags ^= hashWriting
   715  
   716  	bucket := hash & bucketMask(h.B)
   717  	if h.growing() {
   718  		growWork(t, h, bucket)
   719  	}
   720  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
   721  	bOrig := b
   722  	top := tophash(hash)
   723  search:
   724  	for ; b != nil; b = b.overflow(t) {
   725  		for i := uintptr(0); i < bucketCnt; i++ {
   726  			if b.tophash[i] != top {
   727  				if b.tophash[i] == emptyRest {
   728  					break search
   729  				}
   730  				continue
   731  			}
   732  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   733  			k2 := k
   734  			if t.indirectkey() {
   735  				k2 = *((*unsafe.Pointer)(k2))
   736  			}
   737  			if !alg.equal(key, k2) {
   738  				continue
   739  			}
   740  			// Only clear key if there are pointers in it.
   741  			if t.indirectkey() {
   742  				*(*unsafe.Pointer)(k) = nil
   743  			} else if t.key.ptrdata != 0 {
   744  				memclrHasPointers(k, t.key.size)
   745  			}
   746  			e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
   747  			if t.indirectelem() {
   748  				*(*unsafe.Pointer)(e) = nil
   749  			} else if t.elem.ptrdata != 0 {
   750  				memclrHasPointers(e, t.elem.size)
   751  			} else {
   752  				memclrNoHeapPointers(e, t.elem.size)
   753  			}
   754  			b.tophash[i] = emptyOne
   755  			// If the bucket now ends in a bunch of emptyOne states,
   756  			// change those to emptyRest states.
   757  			// It would be nice to make this a separate function, but
   758  			// for loops are not currently inlineable.
   759  			if i == bucketCnt-1 {
   760  				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
   761  					goto notLast
   762  				}
   763  			} else {
   764  				if b.tophash[i+1] != emptyRest {
   765  					goto notLast
   766  				}
   767  			}
   768  			for {
   769  				b.tophash[i] = emptyRest
   770  				if i == 0 {
   771  					if b == bOrig {
   772  						break // beginning of initial bucket, we're done.
   773  					}
   774  					// Find previous bucket, continue at its last entry.
   775  					c := b
   776  					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
   777  					}
   778  					i = bucketCnt - 1
   779  				} else {
   780  					i--
   781  				}
   782  				if b.tophash[i] != emptyOne {
   783  					break
   784  				}
   785  			}
   786  		notLast:
   787  			h.count--
   788  			break search
   789  		}
   790  	}
   791  
   792  	if h.flags&hashWriting == 0 {
   793  		throw("concurrent map writes")
   794  	}
   795  	h.flags &^= hashWriting
   796  }
   797  
   798  // mapiterinit initializes the hiter struct used for ranging over maps.
   799  // The hiter struct pointed to by 'it' is allocated on the stack
   800  // by the compilers order pass or on the heap by reflect_mapiterinit.
   801  // Both need to have zeroed hiter since the struct contains pointers.
   802  func mapiterinit(t *maptype, h *hmap, it *hiter) {
   803  	if raceenabled && h != nil {
   804  		callerpc := getcallerpc()
   805  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
   806  	}
   807  
   808  	if h == nil || h.count == 0 {
   809  		return
   810  	}
   811  
   812  	if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
   813  		throw("hash_iter size incorrect") // see cmd/compile/internal/gc/reflect.go
   814  	}
   815  	it.t = t
   816  	it.h = h
   817  
   818  	// grab snapshot of bucket state
   819  	it.B = h.B
   820  	it.buckets = h.buckets
   821  	if t.bucket.ptrdata == 0 {
   822  		// Allocate the current slice and remember pointers to both current and old.
   823  		// This preserves all relevant overflow buckets alive even if
   824  		// the table grows and/or overflow buckets are added to the table
   825  		// while we are iterating.
   826  		h.createOverflow()
   827  		it.overflow = h.extra.overflow
   828  		it.oldoverflow = h.extra.oldoverflow
   829  	}
   830  
   831  	// decide where to start
   832  	r := uintptr(fastrand())
   833  	if h.B > 31-bucketCntBits {
   834  		r += uintptr(fastrand()) << 31
   835  	}
   836  	it.startBucket = r & bucketMask(h.B)
   837  	it.offset = uint8(r >> h.B & (bucketCnt - 1))
   838  
   839  	// iterator state
   840  	it.bucket = it.startBucket
   841  
   842  	// Remember we have an iterator.
   843  	// Can run concurrently with another mapiterinit().
   844  	if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
   845  		atomic.Or8(&h.flags, iterator|oldIterator)
   846  	}
   847  
   848  	mapiternext(it)
   849  }
   850  
   851  func mapiternext(it *hiter) {
   852  	h := it.h
   853  	if raceenabled {
   854  		callerpc := getcallerpc()
   855  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
   856  	}
   857  	if h.flags&hashWriting != 0 {
   858  		throw("concurrent map iteration and map write")
   859  	}
   860  	t := it.t
   861  	bucket := it.bucket
   862  	b := it.bptr
   863  	i := it.i
   864  	checkBucket := it.checkBucket
   865  	alg := t.key.alg
   866  
   867  next:
   868  	if b == nil {
   869  		if bucket == it.startBucket && it.wrapped {
   870  			// end of iteration
   871  			it.key = nil
   872  			it.elem = nil
   873  			return
   874  		}
   875  		if h.growing() && it.B == h.B {
   876  			// Iterator was started in the middle of a grow, and the grow isn't done yet.
   877  			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
   878  			// bucket hasn't been evacuated) then we need to iterate through the old
   879  			// bucket and only return the ones that will be migrated to this bucket.
   880  			oldbucket := bucket & it.h.oldbucketmask()
   881  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   882  			if !evacuated(b) {
   883  				checkBucket = bucket
   884  			} else {
   885  				b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   886  				checkBucket = noCheck
   887  			}
   888  		} else {
   889  			b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   890  			checkBucket = noCheck
   891  		}
   892  		bucket++
   893  		if bucket == bucketShift(it.B) {
   894  			bucket = 0
   895  			it.wrapped = true
   896  		}
   897  		i = 0
   898  	}
   899  	for ; i < bucketCnt; i++ {
   900  		offi := (i + it.offset) & (bucketCnt - 1)
   901  		if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
   902  			// TODO: emptyRest is hard to use here, as we start iterating
   903  			// in the middle of a bucket. It's feasible, just tricky.
   904  			continue
   905  		}
   906  		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
   907  		if t.indirectkey() {
   908  			k = *((*unsafe.Pointer)(k))
   909  		}
   910  		e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.elemsize))
   911  		if checkBucket != noCheck && !h.sameSizeGrow() {
   912  			// Special case: iterator was started during a grow to a larger size
   913  			// and the grow is not done yet. We're working on a bucket whose
   914  			// oldbucket has not been evacuated yet. Or at least, it wasn't
   915  			// evacuated when we started the bucket. So we're iterating
   916  			// through the oldbucket, skipping any keys that will go
   917  			// to the other new bucket (each oldbucket expands to two
   918  			// buckets during a grow).
   919  			if t.reflexivekey() || alg.equal(k, k) {
   920  				// If the item in the oldbucket is not destined for
   921  				// the current new bucket in the iteration, skip it.
   922  				hash := alg.hash(k, uintptr(h.hash0))
   923  				if hash&bucketMask(it.B) != checkBucket {
   924  					continue
   925  				}
   926  			} else {
   927  				// Hash isn't repeatable if k != k (NaNs).  We need a
   928  				// repeatable and randomish choice of which direction
   929  				// to send NaNs during evacuation. We'll use the low
   930  				// bit of tophash to decide which way NaNs go.
   931  				// NOTE: this case is why we need two evacuate tophash
   932  				// values, evacuatedX and evacuatedY, that differ in
   933  				// their low bit.
   934  				if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
   935  					continue
   936  				}
   937  			}
   938  		}
   939  		if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
   940  			!(t.reflexivekey() || alg.equal(k, k)) {
   941  			// This is the golden data, we can return it.
   942  			// OR
   943  			// key!=key, so the entry can't be deleted or updated, so we can just return it.
   944  			// That's lucky for us because when key!=key we can't look it up successfully.
   945  			it.key = k
   946  			if t.indirectelem() {
   947  				e = *((*unsafe.Pointer)(e))
   948  			}
   949  			it.elem = e
   950  		} else {
   951  			// The hash table has grown since the iterator was started.
   952  			// The golden data for this key is now somewhere else.
   953  			// Check the current hash table for the data.
   954  			// This code handles the case where the key
   955  			// has been deleted, updated, or deleted and reinserted.
   956  			// NOTE: we need to regrab the key as it has potentially been
   957  			// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
   958  			rk, re := mapaccessK(t, h, k)
   959  			if rk == nil {
   960  				continue // key has been deleted
   961  			}
   962  			it.key = rk
   963  			it.elem = re
   964  		}
   965  		it.bucket = bucket
   966  		if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
   967  			it.bptr = b
   968  		}
   969  		it.i = i + 1
   970  		it.checkBucket = checkBucket
   971  		return
   972  	}
   973  	b = b.overflow(t)
   974  	i = 0
   975  	goto next
   976  }
   977  
   978  // mapclear deletes all keys from a map.
   979  func mapclear(t *maptype, h *hmap) {
   980  	if raceenabled && h != nil {
   981  		callerpc := getcallerpc()
   982  		pc := funcPC(mapclear)
   983  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   984  	}
   985  
   986  	if h == nil || h.count == 0 {
   987  		return
   988  	}
   989  
   990  	if h.flags&hashWriting != 0 {
   991  		throw("concurrent map writes")
   992  	}
   993  
   994  	h.flags ^= hashWriting
   995  
   996  	h.flags &^= sameSizeGrow
   997  	h.oldbuckets = nil
   998  	h.nevacuate = 0
   999  	h.noverflow = 0
  1000  	h.count = 0
  1001  
  1002  	// Keep the mapextra allocation but clear any extra information.
  1003  	if h.extra != nil {
  1004  		*h.extra = mapextra{}
  1005  	}
  1006  
  1007  	// makeBucketArray clears the memory pointed to by h.buckets
  1008  	// and recovers any overflow buckets by generating them
  1009  	// as if h.buckets was newly alloced.
  1010  	_, nextOverflow := makeBucketArray(t, h.B, h.buckets)
  1011  	if nextOverflow != nil {
  1012  		// If overflow buckets are created then h.extra
  1013  		// will have been allocated during initial bucket creation.
  1014  		h.extra.nextOverflow = nextOverflow
  1015  	}
  1016  
  1017  	if h.flags&hashWriting == 0 {
  1018  		throw("concurrent map writes")
  1019  	}
  1020  	h.flags &^= hashWriting
  1021  }
  1022  
  1023  func hashGrow(t *maptype, h *hmap) {
  1024  	// If we've hit the load factor, get bigger.
  1025  	// Otherwise, there are too many overflow buckets,
  1026  	// so keep the same number of buckets and "grow" laterally.
  1027  	bigger := uint8(1)
  1028  	if !overLoadFactor(h.count+1, h.B) {
  1029  		bigger = 0
  1030  		h.flags |= sameSizeGrow
  1031  	}
  1032  	oldbuckets := h.buckets
  1033  	newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
  1034  
  1035  	flags := h.flags &^ (iterator | oldIterator)
  1036  	if h.flags&iterator != 0 {
  1037  		flags |= oldIterator
  1038  	}
  1039  	// commit the grow (atomic wrt gc)
  1040  	h.B += bigger
  1041  	h.flags = flags
  1042  	h.oldbuckets = oldbuckets
  1043  	h.buckets = newbuckets
  1044  	h.nevacuate = 0
  1045  	h.noverflow = 0
  1046  
  1047  	if h.extra != nil && h.extra.overflow != nil {
  1048  		// Promote current overflow buckets to the old generation.
  1049  		if h.extra.oldoverflow != nil {
  1050  			throw("oldoverflow is not nil")
  1051  		}
  1052  		h.extra.oldoverflow = h.extra.overflow
  1053  		h.extra.overflow = nil
  1054  	}
  1055  	if nextOverflow != nil {
  1056  		if h.extra == nil {
  1057  			h.extra = new(mapextra)
  1058  		}
  1059  		h.extra.nextOverflow = nextOverflow
  1060  	}
  1061  
  1062  	// the actual copying of the hash table data is done incrementally
  1063  	// by growWork() and evacuate().
  1064  }
  1065  
  1066  // overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
  1067  func overLoadFactor(count int, B uint8) bool {
  1068  	return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
  1069  }
  1070  
  1071  // tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
  1072  // Note that most of these overflow buckets must be in sparse use;
  1073  // if use was dense, then we'd have already triggered regular map growth.
  1074  func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
  1075  	// If the threshold is too low, we do extraneous work.
  1076  	// If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
  1077  	// "too many" means (approximately) as many overflow buckets as regular buckets.
  1078  	// See incrnoverflow for more details.
  1079  	if B > 15 {
  1080  		B = 15
  1081  	}
  1082  	// The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
  1083  	return noverflow >= uint16(1)<<(B&15)
  1084  }
  1085  
  1086  // growing reports whether h is growing. The growth may be to the same size or bigger.
  1087  func (h *hmap) growing() bool {
  1088  	return h.oldbuckets != nil
  1089  }
  1090  
  1091  // sameSizeGrow reports whether the current growth is to a map of the same size.
  1092  func (h *hmap) sameSizeGrow() bool {
  1093  	return h.flags&sameSizeGrow != 0
  1094  }
  1095  
  1096  // noldbuckets calculates the number of buckets prior to the current map growth.
  1097  func (h *hmap) noldbuckets() uintptr {
  1098  	oldB := h.B
  1099  	if !h.sameSizeGrow() {
  1100  		oldB--
  1101  	}
  1102  	return bucketShift(oldB)
  1103  }
  1104  
  1105  // oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
  1106  func (h *hmap) oldbucketmask() uintptr {
  1107  	return h.noldbuckets() - 1
  1108  }
  1109  
  1110  func growWork(t *maptype, h *hmap, bucket uintptr) {
  1111  	// make sure we evacuate the oldbucket corresponding
  1112  	// to the bucket we're about to use
  1113  	evacuate(t, h, bucket&h.oldbucketmask())
  1114  
  1115  	// evacuate one more oldbucket to make progress on growing
  1116  	if h.growing() {
  1117  		evacuate(t, h, h.nevacuate)
  1118  	}
  1119  }
  1120  
  1121  func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
  1122  	b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize)))
  1123  	return evacuated(b)
  1124  }
  1125  
  1126  // evacDst is an evacuation destination.
  1127  type evacDst struct {
  1128  	b *bmap          // current destination bucket
  1129  	i int            // key/elem index into b
  1130  	k unsafe.Pointer // pointer to current key storage
  1131  	e unsafe.Pointer // pointer to current elem storage
  1132  }
  1133  
  1134  func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
  1135  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
  1136  	newbit := h.noldbuckets()
  1137  	if !evacuated(b) {
  1138  		// TODO: reuse overflow buckets instead of using new ones, if there
  1139  		// is no iterator using the old buckets.  (If !oldIterator.)
  1140  
  1141  		// xy contains the x and y (low and high) evacuation destinations.
  1142  		var xy [2]evacDst
  1143  		x := &xy[0]
  1144  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
  1145  		x.k = add(unsafe.Pointer(x.b), dataOffset)
  1146  		x.e = add(x.k, bucketCnt*uintptr(t.keysize))
  1147  
  1148  		if !h.sameSizeGrow() {
  1149  			// Only calculate y pointers if we're growing bigger.
  1150  			// Otherwise GC can see bad pointers.
  1151  			y := &xy[1]
  1152  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
  1153  			y.k = add(unsafe.Pointer(y.b), dataOffset)
  1154  			y.e = add(y.k, bucketCnt*uintptr(t.keysize))
  1155  		}
  1156  
  1157  		for ; b != nil; b = b.overflow(t) {
  1158  			k := add(unsafe.Pointer(b), dataOffset)
  1159  			e := add(k, bucketCnt*uintptr(t.keysize))
  1160  			for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.keysize)), add(e, uintptr(t.elemsize)) {
  1161  				top := b.tophash[i]
  1162  				if isEmpty(top) {
  1163  					b.tophash[i] = evacuatedEmpty
  1164  					continue
  1165  				}
  1166  				if top < minTopHash {
  1167  					throw("bad map state")
  1168  				}
  1169  				k2 := k
  1170  				if t.indirectkey() {
  1171  					k2 = *((*unsafe.Pointer)(k2))
  1172  				}
  1173  				var useY uint8
  1174  				if !h.sameSizeGrow() {
  1175  					// Compute hash to make our evacuation decision (whether we need
  1176  					// to send this key/elem to bucket x or bucket y).
  1177  					hash := t.key.alg.hash(k2, uintptr(h.hash0))
  1178  					if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.alg.equal(k2, k2) {
  1179  						// If key != key (NaNs), then the hash could be (and probably
  1180  						// will be) entirely different from the old hash. Moreover,
  1181  						// it isn't reproducible. Reproducibility is required in the
  1182  						// presence of iterators, as our evacuation decision must
  1183  						// match whatever decision the iterator made.
  1184  						// Fortunately, we have the freedom to send these keys either
  1185  						// way. Also, tophash is meaningless for these kinds of keys.
  1186  						// We let the low bit of tophash drive the evacuation decision.
  1187  						// We recompute a new random tophash for the next level so
  1188  						// these keys will get evenly distributed across all buckets
  1189  						// after multiple grows.
  1190  						useY = top & 1
  1191  						top = tophash(hash)
  1192  					} else {
  1193  						if hash&newbit != 0 {
  1194  							useY = 1
  1195  						}
  1196  					}
  1197  				}
  1198  
  1199  				if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
  1200  					throw("bad evacuatedN")
  1201  				}
  1202  
  1203  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
  1204  				dst := &xy[useY]                 // evacuation destination
  1205  
  1206  				if dst.i == bucketCnt {
  1207  					dst.b = h.newoverflow(t, dst.b)
  1208  					dst.i = 0
  1209  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
  1210  					dst.e = add(dst.k, bucketCnt*uintptr(t.keysize))
  1211  				}
  1212  				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
  1213  				if t.indirectkey() {
  1214  					*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
  1215  				} else {
  1216  					typedmemmove(t.key, dst.k, k) // copy elem
  1217  				}
  1218  				if t.indirectelem() {
  1219  					*(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
  1220  				} else {
  1221  					typedmemmove(t.elem, dst.e, e)
  1222  				}
  1223  				dst.i++
  1224  				// These updates might push these pointers past the end of the
  1225  				// key or elem arrays.  That's ok, as we have the overflow pointer
  1226  				// at the end of the bucket to protect against pointing past the
  1227  				// end of the bucket.
  1228  				dst.k = add(dst.k, uintptr(t.keysize))
  1229  				dst.e = add(dst.e, uintptr(t.elemsize))
  1230  			}
  1231  		}
  1232  		// Unlink the overflow buckets & clear key/elem to help GC.
  1233  		if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
  1234  			b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
  1235  			// Preserve b.tophash because the evacuation
  1236  			// state is maintained there.
  1237  			ptr := add(b, dataOffset)
  1238  			n := uintptr(t.bucketsize) - dataOffset
  1239  			memclrHasPointers(ptr, n)
  1240  		}
  1241  	}
  1242  
  1243  	if oldbucket == h.nevacuate {
  1244  		advanceEvacuationMark(h, t, newbit)
  1245  	}
  1246  }
  1247  
  1248  func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
  1249  	h.nevacuate++
  1250  	// Experiments suggest that 1024 is overkill by at least an order of magnitude.
  1251  	// Put it in there as a safeguard anyway, to ensure O(1) behavior.
  1252  	stop := h.nevacuate + 1024
  1253  	if stop > newbit {
  1254  		stop = newbit
  1255  	}
  1256  	for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
  1257  		h.nevacuate++
  1258  	}
  1259  	if h.nevacuate == newbit { // newbit == # of oldbuckets
  1260  		// Growing is all done. Free old main bucket array.
  1261  		h.oldbuckets = nil
  1262  		// Can discard old overflow buckets as well.
  1263  		// If they are still referenced by an iterator,
  1264  		// then the iterator holds a pointers to the slice.
  1265  		if h.extra != nil {
  1266  			h.extra.oldoverflow = nil
  1267  		}
  1268  		h.flags &^= sameSizeGrow
  1269  	}
  1270  }
  1271  
  1272  func ismapkey(t *_type) bool {
  1273  	return t.alg.hash != nil
  1274  }
  1275  
  1276  // Reflect stubs. Called from ../reflect/asm_*.s
  1277  
  1278  //go:linkname reflect_makemap reflect.makemap
  1279  func reflect_makemap(t *maptype, cap int) *hmap {
  1280  	// Check invariants and reflects math.
  1281  	if !ismapkey(t.key) {
  1282  		throw("runtime.reflect_makemap: unsupported map key type")
  1283  	}
  1284  	if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
  1285  		t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) {
  1286  		throw("key size wrong")
  1287  	}
  1288  	if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(sys.PtrSize)) ||
  1289  		t.elem.size <= maxElemSize && (t.indirectelem() || t.elemsize != uint8(t.elem.size)) {
  1290  		throw("elem size wrong")
  1291  	}
  1292  	if t.key.align > bucketCnt {
  1293  		throw("key align too big")
  1294  	}
  1295  	if t.elem.align > bucketCnt {
  1296  		throw("elem align too big")
  1297  	}
  1298  	if t.key.size%uintptr(t.key.align) != 0 {
  1299  		throw("key size not a multiple of key align")
  1300  	}
  1301  	if t.elem.size%uintptr(t.elem.align) != 0 {
  1302  		throw("elem size not a multiple of elem align")
  1303  	}
  1304  	if bucketCnt < 8 {
  1305  		throw("bucketsize too small for proper alignment")
  1306  	}
  1307  	if dataOffset%uintptr(t.key.align) != 0 {
  1308  		throw("need padding in bucket (key)")
  1309  	}
  1310  	if dataOffset%uintptr(t.elem.align) != 0 {
  1311  		throw("need padding in bucket (elem)")
  1312  	}
  1313  
  1314  	return makemap(t, cap, nil)
  1315  }
  1316  
  1317  //go:linkname reflect_mapaccess reflect.mapaccess
  1318  func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
  1319  	elem, ok := mapaccess2(t, h, key)
  1320  	if !ok {
  1321  		// reflect wants nil for a missing element
  1322  		elem = nil
  1323  	}
  1324  	return elem
  1325  }
  1326  
  1327  //go:linkname reflect_mapassign reflect.mapassign
  1328  func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
  1329  	p := mapassign(t, h, key)
  1330  	typedmemmove(t.elem, p, elem)
  1331  }
  1332  
  1333  //go:linkname reflect_mapdelete reflect.mapdelete
  1334  func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
  1335  	mapdelete(t, h, key)
  1336  }
  1337  
  1338  //go:linkname reflect_mapiterinit reflect.mapiterinit
  1339  func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
  1340  	it := new(hiter)
  1341  	mapiterinit(t, h, it)
  1342  	return it
  1343  }
  1344  
  1345  //go:linkname reflect_mapiternext reflect.mapiternext
  1346  func reflect_mapiternext(it *hiter) {
  1347  	mapiternext(it)
  1348  }
  1349  
  1350  //go:linkname reflect_mapiterkey reflect.mapiterkey
  1351  func reflect_mapiterkey(it *hiter) unsafe.Pointer {
  1352  	return it.key
  1353  }
  1354  
  1355  //go:linkname reflect_mapiterelem reflect.mapiterelem
  1356  func reflect_mapiterelem(it *hiter) unsafe.Pointer {
  1357  	return it.elem
  1358  }
  1359  
  1360  //go:linkname reflect_maplen reflect.maplen
  1361  func reflect_maplen(h *hmap) int {
  1362  	if h == nil {
  1363  		return 0
  1364  	}
  1365  	if raceenabled {
  1366  		callerpc := getcallerpc()
  1367  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
  1368  	}
  1369  	return h.count
  1370  }
  1371  
  1372  //go:linkname reflectlite_maplen internal/reflectlite.maplen
  1373  func reflectlite_maplen(h *hmap) int {
  1374  	if h == nil {
  1375  		return 0
  1376  	}
  1377  	if raceenabled {
  1378  		callerpc := getcallerpc()
  1379  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
  1380  	}
  1381  	return h.count
  1382  }
  1383  
  1384  //go:linkname reflect_ismapkey reflect.ismapkey
  1385  func reflect_ismapkey(t *_type) bool {
  1386  	return ismapkey(t)
  1387  }
  1388  
  1389  const maxZero = 1024 // must match value in cmd/compile/internal/gc/walk.go
  1390  var zeroVal [maxZero]byte
  1391  

View as plain text