Source file src/runtime/map_fast64.go

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"unsafe"
    11  )
    12  
    13  func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
    14  	if raceenabled && h != nil {
    15  		callerpc := getcallerpc()
    16  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
    17  	}
    18  	if h == nil || h.count == 0 {
    19  		return unsafe.Pointer(&zeroVal[0])
    20  	}
    21  	if h.flags&hashWriting != 0 {
    22  		fatal("concurrent map read and map write")
    23  	}
    24  	var b *bmap
    25  	if h.B == 0 {
    26  		// One-bucket table. No need to hash.
    27  		b = (*bmap)(h.buckets)
    28  	} else {
    29  		hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    30  		m := bucketMask(h.B)
    31  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
    32  		if c := h.oldbuckets; c != nil {
    33  			if !h.sameSizeGrow() {
    34  				// There used to be half as many buckets; mask down one more power of two.
    35  				m >>= 1
    36  			}
    37  			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
    38  			if !evacuated(oldb) {
    39  				b = oldb
    40  			}
    41  		}
    42  	}
    43  	for ; b != nil; b = b.overflow(t) {
    44  		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
    45  			if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
    46  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize))
    47  			}
    48  		}
    49  	}
    50  	return unsafe.Pointer(&zeroVal[0])
    51  }
    52  
    53  func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
    54  	if raceenabled && h != nil {
    55  		callerpc := getcallerpc()
    56  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
    57  	}
    58  	if h == nil || h.count == 0 {
    59  		return unsafe.Pointer(&zeroVal[0]), false
    60  	}
    61  	if h.flags&hashWriting != 0 {
    62  		fatal("concurrent map read and map write")
    63  	}
    64  	var b *bmap
    65  	if h.B == 0 {
    66  		// One-bucket table. No need to hash.
    67  		b = (*bmap)(h.buckets)
    68  	} else {
    69  		hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    70  		m := bucketMask(h.B)
    71  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
    72  		if c := h.oldbuckets; c != nil {
    73  			if !h.sameSizeGrow() {
    74  				// There used to be half as many buckets; mask down one more power of two.
    75  				m >>= 1
    76  			}
    77  			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
    78  			if !evacuated(oldb) {
    79  				b = oldb
    80  			}
    81  		}
    82  	}
    83  	for ; b != nil; b = b.overflow(t) {
    84  		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
    85  			if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
    86  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize)), true
    87  			}
    88  		}
    89  	}
    90  	return unsafe.Pointer(&zeroVal[0]), false
    91  }
    92  
    93  func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
    94  	if h == nil {
    95  		panic(plainError("assignment to entry in nil map"))
    96  	}
    97  	if raceenabled {
    98  		callerpc := getcallerpc()
    99  		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
   100  	}
   101  	if h.flags&hashWriting != 0 {
   102  		fatal("concurrent map writes")
   103  	}
   104  	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   105  
   106  	// Set hashWriting after calling t.hasher for consistency with mapassign.
   107  	h.flags ^= hashWriting
   108  
   109  	if h.buckets == nil {
   110  		h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
   111  	}
   112  
   113  again:
   114  	bucket := hash & bucketMask(h.B)
   115  	if h.growing() {
   116  		growWork_fast64(t, h, bucket)
   117  	}
   118  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   119  
   120  	var insertb *bmap
   121  	var inserti uintptr
   122  	var insertk unsafe.Pointer
   123  
   124  bucketloop:
   125  	for {
   126  		for i := uintptr(0); i < bucketCnt; i++ {
   127  			if isEmpty(b.tophash[i]) {
   128  				if insertb == nil {
   129  					insertb = b
   130  					inserti = i
   131  				}
   132  				if b.tophash[i] == emptyRest {
   133  					break bucketloop
   134  				}
   135  				continue
   136  			}
   137  			k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
   138  			if k != key {
   139  				continue
   140  			}
   141  			insertb = b
   142  			inserti = i
   143  			goto done
   144  		}
   145  		ovf := b.overflow(t)
   146  		if ovf == nil {
   147  			break
   148  		}
   149  		b = ovf
   150  	}
   151  
   152  	// Did not find mapping for key. Allocate new cell & add entry.
   153  
   154  	// If we hit the max load factor or we have too many overflow buckets,
   155  	// and we're not already in the middle of growing, start growing.
   156  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   157  		hashGrow(t, h)
   158  		goto again // Growing the table invalidates everything, so try again
   159  	}
   160  
   161  	if insertb == nil {
   162  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
   163  		insertb = h.newoverflow(t, b)
   164  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
   165  	}
   166  	insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
   167  
   168  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
   169  	// store new key at insert position
   170  	*(*uint64)(insertk) = key
   171  
   172  	h.count++
   173  
   174  done:
   175  	elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.ValueSize))
   176  	if h.flags&hashWriting == 0 {
   177  		fatal("concurrent map writes")
   178  	}
   179  	h.flags &^= hashWriting
   180  	return elem
   181  }
   182  
   183  func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   184  	if h == nil {
   185  		panic(plainError("assignment to entry in nil map"))
   186  	}
   187  	if raceenabled {
   188  		callerpc := getcallerpc()
   189  		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
   190  	}
   191  	if h.flags&hashWriting != 0 {
   192  		fatal("concurrent map writes")
   193  	}
   194  	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   195  
   196  	// Set hashWriting after calling t.hasher for consistency with mapassign.
   197  	h.flags ^= hashWriting
   198  
   199  	if h.buckets == nil {
   200  		h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
   201  	}
   202  
   203  again:
   204  	bucket := hash & bucketMask(h.B)
   205  	if h.growing() {
   206  		growWork_fast64(t, h, bucket)
   207  	}
   208  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   209  
   210  	var insertb *bmap
   211  	var inserti uintptr
   212  	var insertk unsafe.Pointer
   213  
   214  bucketloop:
   215  	for {
   216  		for i := uintptr(0); i < bucketCnt; i++ {
   217  			if isEmpty(b.tophash[i]) {
   218  				if insertb == nil {
   219  					insertb = b
   220  					inserti = i
   221  				}
   222  				if b.tophash[i] == emptyRest {
   223  					break bucketloop
   224  				}
   225  				continue
   226  			}
   227  			k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
   228  			if k != key {
   229  				continue
   230  			}
   231  			insertb = b
   232  			inserti = i
   233  			goto done
   234  		}
   235  		ovf := b.overflow(t)
   236  		if ovf == nil {
   237  			break
   238  		}
   239  		b = ovf
   240  	}
   241  
   242  	// Did not find mapping for key. Allocate new cell & add entry.
   243  
   244  	// If we hit the max load factor or we have too many overflow buckets,
   245  	// and we're not already in the middle of growing, start growing.
   246  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   247  		hashGrow(t, h)
   248  		goto again // Growing the table invalidates everything, so try again
   249  	}
   250  
   251  	if insertb == nil {
   252  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
   253  		insertb = h.newoverflow(t, b)
   254  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
   255  	}
   256  	insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
   257  
   258  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
   259  	// store new key at insert position
   260  	*(*unsafe.Pointer)(insertk) = key
   261  
   262  	h.count++
   263  
   264  done:
   265  	elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.ValueSize))
   266  	if h.flags&hashWriting == 0 {
   267  		fatal("concurrent map writes")
   268  	}
   269  	h.flags &^= hashWriting
   270  	return elem
   271  }
   272  
   273  func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
   274  	if raceenabled && h != nil {
   275  		callerpc := getcallerpc()
   276  		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
   277  	}
   278  	if h == nil || h.count == 0 {
   279  		return
   280  	}
   281  	if h.flags&hashWriting != 0 {
   282  		fatal("concurrent map writes")
   283  	}
   284  
   285  	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   286  
   287  	// Set hashWriting after calling t.hasher for consistency with mapdelete
   288  	h.flags ^= hashWriting
   289  
   290  	bucket := hash & bucketMask(h.B)
   291  	if h.growing() {
   292  		growWork_fast64(t, h, bucket)
   293  	}
   294  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   295  	bOrig := b
   296  search:
   297  	for ; b != nil; b = b.overflow(t) {
   298  		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
   299  			if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
   300  				continue
   301  			}
   302  			// Only clear key if there are pointers in it.
   303  			if t.Key.PtrBytes != 0 {
   304  				if goarch.PtrSize == 8 {
   305  					*(*unsafe.Pointer)(k) = nil
   306  				} else {
   307  					// There are three ways to squeeze at one or more 32 bit pointers into 64 bits.
   308  					// Just call memclrHasPointers instead of trying to handle all cases here.
   309  					memclrHasPointers(k, 8)
   310  				}
   311  			}
   312  			e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize))
   313  			if t.Elem.PtrBytes != 0 {
   314  				memclrHasPointers(e, t.Elem.Size_)
   315  			} else {
   316  				memclrNoHeapPointers(e, t.Elem.Size_)
   317  			}
   318  			b.tophash[i] = emptyOne
   319  			// If the bucket now ends in a bunch of emptyOne states,
   320  			// change those to emptyRest states.
   321  			if i == bucketCnt-1 {
   322  				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
   323  					goto notLast
   324  				}
   325  			} else {
   326  				if b.tophash[i+1] != emptyRest {
   327  					goto notLast
   328  				}
   329  			}
   330  			for {
   331  				b.tophash[i] = emptyRest
   332  				if i == 0 {
   333  					if b == bOrig {
   334  						break // beginning of initial bucket, we're done.
   335  					}
   336  					// Find previous bucket, continue at its last entry.
   337  					c := b
   338  					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
   339  					}
   340  					i = bucketCnt - 1
   341  				} else {
   342  					i--
   343  				}
   344  				if b.tophash[i] != emptyOne {
   345  					break
   346  				}
   347  			}
   348  		notLast:
   349  			h.count--
   350  			// Reset the hash seed to make it more difficult for attackers to
   351  			// repeatedly trigger hash collisions. See issue 25237.
   352  			if h.count == 0 {
   353  				h.hash0 = uint32(rand())
   354  			}
   355  			break search
   356  		}
   357  	}
   358  
   359  	if h.flags&hashWriting == 0 {
   360  		fatal("concurrent map writes")
   361  	}
   362  	h.flags &^= hashWriting
   363  }
   364  
   365  func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
   366  	// make sure we evacuate the oldbucket corresponding
   367  	// to the bucket we're about to use
   368  	evacuate_fast64(t, h, bucket&h.oldbucketmask())
   369  
   370  	// evacuate one more oldbucket to make progress on growing
   371  	if h.growing() {
   372  		evacuate_fast64(t, h, h.nevacuate)
   373  	}
   374  }
   375  
   376  func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
   377  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
   378  	newbit := h.noldbuckets()
   379  	if !evacuated(b) {
   380  		// TODO: reuse overflow buckets instead of using new ones, if there
   381  		// is no iterator using the old buckets.  (If !oldIterator.)
   382  
   383  		// xy contains the x and y (low and high) evacuation destinations.
   384  		var xy [2]evacDst
   385  		x := &xy[0]
   386  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
   387  		x.k = add(unsafe.Pointer(x.b), dataOffset)
   388  		x.e = add(x.k, bucketCnt*8)
   389  
   390  		if !h.sameSizeGrow() {
   391  			// Only calculate y pointers if we're growing bigger.
   392  			// Otherwise GC can see bad pointers.
   393  			y := &xy[1]
   394  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
   395  			y.k = add(unsafe.Pointer(y.b), dataOffset)
   396  			y.e = add(y.k, bucketCnt*8)
   397  		}
   398  
   399  		for ; b != nil; b = b.overflow(t) {
   400  			k := add(unsafe.Pointer(b), dataOffset)
   401  			e := add(k, bucketCnt*8)
   402  			for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
   403  				top := b.tophash[i]
   404  				if isEmpty(top) {
   405  					b.tophash[i] = evacuatedEmpty
   406  					continue
   407  				}
   408  				if top < minTopHash {
   409  					throw("bad map state")
   410  				}
   411  				var useY uint8
   412  				if !h.sameSizeGrow() {
   413  					// Compute hash to make our evacuation decision (whether we need
   414  					// to send this key/elem to bucket x or bucket y).
   415  					hash := t.Hasher(k, uintptr(h.hash0))
   416  					if hash&newbit != 0 {
   417  						useY = 1
   418  					}
   419  				}
   420  
   421  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
   422  				dst := &xy[useY]                 // evacuation destination
   423  
   424  				if dst.i == bucketCnt {
   425  					dst.b = h.newoverflow(t, dst.b)
   426  					dst.i = 0
   427  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
   428  					dst.e = add(dst.k, bucketCnt*8)
   429  				}
   430  				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
   431  
   432  				// Copy key.
   433  				if t.Key.PtrBytes != 0 && writeBarrier.enabled {
   434  					if goarch.PtrSize == 8 {
   435  						// Write with a write barrier.
   436  						*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
   437  					} else {
   438  						// There are three ways to squeeze at least one 32 bit pointer into 64 bits.
   439  						// Give up and call typedmemmove.
   440  						typedmemmove(t.Key, dst.k, k)
   441  					}
   442  				} else {
   443  					*(*uint64)(dst.k) = *(*uint64)(k)
   444  				}
   445  
   446  				typedmemmove(t.Elem, dst.e, e)
   447  				dst.i++
   448  				// These updates might push these pointers past the end of the
   449  				// key or elem arrays.  That's ok, as we have the overflow pointer
   450  				// at the end of the bucket to protect against pointing past the
   451  				// end of the bucket.
   452  				dst.k = add(dst.k, 8)
   453  				dst.e = add(dst.e, uintptr(t.ValueSize))
   454  			}
   455  		}
   456  		// Unlink the overflow buckets & clear key/elem to help GC.
   457  		if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
   458  			b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
   459  			// Preserve b.tophash because the evacuation
   460  			// state is maintained there.
   461  			ptr := add(b, dataOffset)
   462  			n := uintptr(t.BucketSize) - dataOffset
   463  			memclrHasPointers(ptr, n)
   464  		}
   465  	}
   466  
   467  	if oldbucket == h.nevacuate {
   468  		advanceEvacuationMark(h, t, newbit)
   469  	}
   470  }
   471  

View as plain text