...
Run Format

Source file src/runtime/map_fast64.go

Documentation: runtime

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/sys"
     9  	"unsafe"
    10  )
    11  
    12  func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
    13  	if raceenabled && h != nil {
    14  		callerpc := getcallerpc()
    15  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
    16  	}
    17  	if h == nil || h.count == 0 {
    18  		return unsafe.Pointer(&zeroVal[0])
    19  	}
    20  	if h.flags&hashWriting != 0 {
    21  		throw("concurrent map read and map write")
    22  	}
    23  	var b *bmap
    24  	if h.B == 0 {
    25  		// One-bucket table. No need to hash.
    26  		b = (*bmap)(h.buckets)
    27  	} else {
    28  		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    29  		m := bucketMask(h.B)
    30  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    31  		if c := h.oldbuckets; c != nil {
    32  			if !h.sameSizeGrow() {
    33  				// There used to be half as many buckets; mask down one more power of two.
    34  				m >>= 1
    35  			}
    36  			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
    37  			if !evacuated(oldb) {
    38  				b = oldb
    39  			}
    40  		}
    41  	}
    42  	for ; b != nil; b = b.overflow(t) {
    43  		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
    44  			if *(*uint64)(k) == key && b.tophash[i] != empty {
    45  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
    46  			}
    47  		}
    48  	}
    49  	return unsafe.Pointer(&zeroVal[0])
    50  }
    51  
    52  func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
    53  	if raceenabled && h != nil {
    54  		callerpc := getcallerpc()
    55  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
    56  	}
    57  	if h == nil || h.count == 0 {
    58  		return unsafe.Pointer(&zeroVal[0]), false
    59  	}
    60  	if h.flags&hashWriting != 0 {
    61  		throw("concurrent map read and map write")
    62  	}
    63  	var b *bmap
    64  	if h.B == 0 {
    65  		// One-bucket table. No need to hash.
    66  		b = (*bmap)(h.buckets)
    67  	} else {
    68  		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    69  		m := bucketMask(h.B)
    70  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    71  		if c := h.oldbuckets; c != nil {
    72  			if !h.sameSizeGrow() {
    73  				// There used to be half as many buckets; mask down one more power of two.
    74  				m >>= 1
    75  			}
    76  			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
    77  			if !evacuated(oldb) {
    78  				b = oldb
    79  			}
    80  		}
    81  	}
    82  	for ; b != nil; b = b.overflow(t) {
    83  		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
    84  			if *(*uint64)(k) == key && b.tophash[i] != empty {
    85  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
    86  			}
    87  		}
    88  	}
    89  	return unsafe.Pointer(&zeroVal[0]), false
    90  }
    91  
    92  func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
    93  	if h == nil {
    94  		panic(plainError("assignment to entry in nil map"))
    95  	}
    96  	if raceenabled {
    97  		callerpc := getcallerpc()
    98  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
    99  	}
   100  	if h.flags&hashWriting != 0 {
   101  		throw("concurrent map writes")
   102  	}
   103  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   104  
   105  	// Set hashWriting after calling alg.hash for consistency with mapassign.
   106  	h.flags |= hashWriting
   107  
   108  	if h.buckets == nil {
   109  		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
   110  	}
   111  
   112  again:
   113  	bucket := hash & bucketMask(h.B)
   114  	if h.growing() {
   115  		growWork_fast64(t, h, bucket)
   116  	}
   117  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   118  
   119  	var insertb *bmap
   120  	var inserti uintptr
   121  	var insertk unsafe.Pointer
   122  
   123  	for {
   124  		for i := uintptr(0); i < bucketCnt; i++ {
   125  			if b.tophash[i] == empty {
   126  				if insertb == nil {
   127  					insertb = b
   128  					inserti = i
   129  				}
   130  				continue
   131  			}
   132  			k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
   133  			if k != key {
   134  				continue
   135  			}
   136  			insertb = b
   137  			inserti = i
   138  			goto done
   139  		}
   140  		ovf := b.overflow(t)
   141  		if ovf == nil {
   142  			break
   143  		}
   144  		b = ovf
   145  	}
   146  
   147  	// Did not find mapping for key. Allocate new cell & add entry.
   148  
   149  	// If we hit the max load factor or we have too many overflow buckets,
   150  	// and we're not already in the middle of growing, start growing.
   151  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   152  		hashGrow(t, h)
   153  		goto again // Growing the table invalidates everything, so try again
   154  	}
   155  
   156  	if insertb == nil {
   157  		// all current buckets are full, allocate a new one.
   158  		insertb = h.newoverflow(t, b)
   159  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
   160  	}
   161  	insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
   162  
   163  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
   164  	// store new key at insert position
   165  	*(*uint64)(insertk) = key
   166  
   167  	h.count++
   168  
   169  done:
   170  	val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.valuesize))
   171  	if h.flags&hashWriting == 0 {
   172  		throw("concurrent map writes")
   173  	}
   174  	h.flags &^= hashWriting
   175  	return val
   176  }
   177  
   178  func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   179  	if h == nil {
   180  		panic(plainError("assignment to entry in nil map"))
   181  	}
   182  	if raceenabled {
   183  		callerpc := getcallerpc()
   184  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
   185  	}
   186  	if h.flags&hashWriting != 0 {
   187  		throw("concurrent map writes")
   188  	}
   189  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   190  
   191  	// Set hashWriting after calling alg.hash for consistency with mapassign.
   192  	h.flags |= hashWriting
   193  
   194  	if h.buckets == nil {
   195  		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
   196  	}
   197  
   198  again:
   199  	bucket := hash & bucketMask(h.B)
   200  	if h.growing() {
   201  		growWork_fast64(t, h, bucket)
   202  	}
   203  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   204  
   205  	var insertb *bmap
   206  	var inserti uintptr
   207  	var insertk unsafe.Pointer
   208  
   209  	for {
   210  		for i := uintptr(0); i < bucketCnt; i++ {
   211  			if b.tophash[i] == empty {
   212  				if insertb == nil {
   213  					insertb = b
   214  					inserti = i
   215  				}
   216  				continue
   217  			}
   218  			k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
   219  			if k != key {
   220  				continue
   221  			}
   222  			insertb = b
   223  			inserti = i
   224  			goto done
   225  		}
   226  		ovf := b.overflow(t)
   227  		if ovf == nil {
   228  			break
   229  		}
   230  		b = ovf
   231  	}
   232  
   233  	// Did not find mapping for key. Allocate new cell & add entry.
   234  
   235  	// If we hit the max load factor or we have too many overflow buckets,
   236  	// and we're not already in the middle of growing, start growing.
   237  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   238  		hashGrow(t, h)
   239  		goto again // Growing the table invalidates everything, so try again
   240  	}
   241  
   242  	if insertb == nil {
   243  		// all current buckets are full, allocate a new one.
   244  		insertb = h.newoverflow(t, b)
   245  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
   246  	}
   247  	insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
   248  
   249  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
   250  	// store new key at insert position
   251  	*(*unsafe.Pointer)(insertk) = key
   252  
   253  	h.count++
   254  
   255  done:
   256  	val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.valuesize))
   257  	if h.flags&hashWriting == 0 {
   258  		throw("concurrent map writes")
   259  	}
   260  	h.flags &^= hashWriting
   261  	return val
   262  }
   263  
   264  func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
   265  	if raceenabled && h != nil {
   266  		callerpc := getcallerpc()
   267  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
   268  	}
   269  	if h == nil || h.count == 0 {
   270  		return
   271  	}
   272  	if h.flags&hashWriting != 0 {
   273  		throw("concurrent map writes")
   274  	}
   275  
   276  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   277  
   278  	// Set hashWriting after calling alg.hash for consistency with mapdelete
   279  	h.flags |= hashWriting
   280  
   281  	bucket := hash & bucketMask(h.B)
   282  	if h.growing() {
   283  		growWork_fast64(t, h, bucket)
   284  	}
   285  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
   286  search:
   287  	for ; b != nil; b = b.overflow(t) {
   288  		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
   289  			if key != *(*uint64)(k) || b.tophash[i] == empty {
   290  				continue
   291  			}
   292  			// Only clear key if there are pointers in it.
   293  			if t.key.kind&kindNoPointers == 0 {
   294  				memclrHasPointers(k, t.key.size)
   295  			}
   296  			v := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
   297  			if t.elem.kind&kindNoPointers == 0 {
   298  				memclrHasPointers(v, t.elem.size)
   299  			} else {
   300  				memclrNoHeapPointers(v, t.elem.size)
   301  			}
   302  			b.tophash[i] = empty
   303  			h.count--
   304  			break search
   305  		}
   306  	}
   307  
   308  	if h.flags&hashWriting == 0 {
   309  		throw("concurrent map writes")
   310  	}
   311  	h.flags &^= hashWriting
   312  }
   313  
   314  func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
   315  	// make sure we evacuate the oldbucket corresponding
   316  	// to the bucket we're about to use
   317  	evacuate_fast64(t, h, bucket&h.oldbucketmask())
   318  
   319  	// evacuate one more oldbucket to make progress on growing
   320  	if h.growing() {
   321  		evacuate_fast64(t, h, h.nevacuate)
   322  	}
   323  }
   324  
   325  func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
   326  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   327  	newbit := h.noldbuckets()
   328  	if !evacuated(b) {
   329  		// TODO: reuse overflow buckets instead of using new ones, if there
   330  		// is no iterator using the old buckets.  (If !oldIterator.)
   331  
   332  		// xy contains the x and y (low and high) evacuation destinations.
   333  		var xy [2]evacDst
   334  		x := &xy[0]
   335  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
   336  		x.k = add(unsafe.Pointer(x.b), dataOffset)
   337  		x.v = add(x.k, bucketCnt*8)
   338  
   339  		if !h.sameSizeGrow() {
   340  			// Only calculate y pointers if we're growing bigger.
   341  			// Otherwise GC can see bad pointers.
   342  			y := &xy[1]
   343  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
   344  			y.k = add(unsafe.Pointer(y.b), dataOffset)
   345  			y.v = add(y.k, bucketCnt*8)
   346  		}
   347  
   348  		for ; b != nil; b = b.overflow(t) {
   349  			k := add(unsafe.Pointer(b), dataOffset)
   350  			v := add(k, bucketCnt*8)
   351  			for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 8), add(v, uintptr(t.valuesize)) {
   352  				top := b.tophash[i]
   353  				if top == empty {
   354  					b.tophash[i] = evacuatedEmpty
   355  					continue
   356  				}
   357  				if top < minTopHash {
   358  					throw("bad map state")
   359  				}
   360  				var useY uint8
   361  				if !h.sameSizeGrow() {
   362  					// Compute hash to make our evacuation decision (whether we need
   363  					// to send this key/value to bucket x or bucket y).
   364  					hash := t.key.alg.hash(k, uintptr(h.hash0))
   365  					if hash&newbit != 0 {
   366  						useY = 1
   367  					}
   368  				}
   369  
   370  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
   371  				dst := &xy[useY]                 // evacuation destination
   372  
   373  				if dst.i == bucketCnt {
   374  					dst.b = h.newoverflow(t, dst.b)
   375  					dst.i = 0
   376  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
   377  					dst.v = add(dst.k, bucketCnt*8)
   378  				}
   379  				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
   380  
   381  				// Copy key.
   382  				if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
   383  					if sys.PtrSize == 8 {
   384  						// Write with a write barrier.
   385  						*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
   386  					} else {
   387  						// There are three ways to squeeze at least one 32 bit pointer into 64 bits.
   388  						// Give up and call typedmemmove.
   389  						typedmemmove(t.key, dst.k, k)
   390  					}
   391  				} else {
   392  					*(*uint64)(dst.k) = *(*uint64)(k)
   393  				}
   394  
   395  				typedmemmove(t.elem, dst.v, v)
   396  				dst.i++
   397  				// These updates might push these pointers past the end of the
   398  				// key or value arrays.  That's ok, as we have the overflow pointer
   399  				// at the end of the bucket to protect against pointing past the
   400  				// end of the bucket.
   401  				dst.k = add(dst.k, 8)
   402  				dst.v = add(dst.v, uintptr(t.valuesize))
   403  			}
   404  		}
   405  		// Unlink the overflow buckets & clear key/value to help GC.
   406  		if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
   407  			b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
   408  			// Preserve b.tophash because the evacuation
   409  			// state is maintained there.
   410  			ptr := add(b, dataOffset)
   411  			n := uintptr(t.bucketsize) - dataOffset
   412  			memclrHasPointers(ptr, n)
   413  		}
   414  	}
   415  
   416  	if oldbucket == h.nevacuate {
   417  		advanceEvacuationMark(h, t, newbit)
   418  	}
   419  }
   420  

View as plain text