Source file src/runtime/map_faststr.go

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"unsafe"
    11  )
    12  
    13  func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
    14  	if raceenabled && h != nil {
    15  		callerpc := getcallerpc()
    16  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
    17  	}
    18  	if h == nil || h.count == 0 {
    19  		return unsafe.Pointer(&zeroVal[0])
    20  	}
    21  	if h.flags&hashWriting != 0 {
    22  		fatal("concurrent map read and map write")
    23  	}
    24  	key := stringStructOf(&ky)
    25  	if h.B == 0 {
    26  		// One-bucket table.
    27  		b := (*bmap)(h.buckets)
    28  		if key.len < 32 {
    29  			// short key, doing lots of comparisons is ok
    30  			for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
    31  				k := (*stringStruct)(kptr)
    32  				if k.len != key.len || isEmpty(b.tophash[i]) {
    33  					if b.tophash[i] == emptyRest {
    34  						break
    35  					}
    36  					continue
    37  				}
    38  				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
    39  					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
    40  				}
    41  			}
    42  			return unsafe.Pointer(&zeroVal[0])
    43  		}
    44  		// long key, try not to do more comparisons than necessary
    45  		keymaybe := uintptr(bucketCnt)
    46  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
    47  			k := (*stringStruct)(kptr)
    48  			if k.len != key.len || isEmpty(b.tophash[i]) {
    49  				if b.tophash[i] == emptyRest {
    50  					break
    51  				}
    52  				continue
    53  			}
    54  			if k.str == key.str {
    55  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
    56  			}
    57  			// check first 4 bytes
    58  			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
    59  				continue
    60  			}
    61  			// check last 4 bytes
    62  			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
    63  				continue
    64  			}
    65  			if keymaybe != bucketCnt {
    66  				// Two keys are potential matches. Use hash to distinguish them.
    67  				goto dohash
    68  			}
    69  			keymaybe = i
    70  		}
    71  		if keymaybe != bucketCnt {
    72  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
    73  			if memequal(k.str, key.str, uintptr(key.len)) {
    74  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
    75  			}
    76  		}
    77  		return unsafe.Pointer(&zeroVal[0])
    78  	}
    79  dohash:
    80  	hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
    81  	m := bucketMask(h.B)
    82  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
    83  	if c := h.oldbuckets; c != nil {
    84  		if !h.sameSizeGrow() {
    85  			// There used to be half as many buckets; mask down one more power of two.
    86  			m >>= 1
    87  		}
    88  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
    89  		if !evacuated(oldb) {
    90  			b = oldb
    91  		}
    92  	}
    93  	top := tophash(hash)
    94  	for ; b != nil; b = b.overflow(t) {
    95  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
    96  			k := (*stringStruct)(kptr)
    97  			if k.len != key.len || b.tophash[i] != top {
    98  				continue
    99  			}
   100  			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   101  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
   102  			}
   103  		}
   104  	}
   105  	return unsafe.Pointer(&zeroVal[0])
   106  }
   107  
   108  func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
   109  	if raceenabled && h != nil {
   110  		callerpc := getcallerpc()
   111  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
   112  	}
   113  	if h == nil || h.count == 0 {
   114  		return unsafe.Pointer(&zeroVal[0]), false
   115  	}
   116  	if h.flags&hashWriting != 0 {
   117  		fatal("concurrent map read and map write")
   118  	}
   119  	key := stringStructOf(&ky)
   120  	if h.B == 0 {
   121  		// One-bucket table.
   122  		b := (*bmap)(h.buckets)
   123  		if key.len < 32 {
   124  			// short key, doing lots of comparisons is ok
   125  			for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
   126  				k := (*stringStruct)(kptr)
   127  				if k.len != key.len || isEmpty(b.tophash[i]) {
   128  					if b.tophash[i] == emptyRest {
   129  						break
   130  					}
   131  					continue
   132  				}
   133  				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   134  					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
   135  				}
   136  			}
   137  			return unsafe.Pointer(&zeroVal[0]), false
   138  		}
   139  		// long key, try not to do more comparisons than necessary
   140  		keymaybe := uintptr(bucketCnt)
   141  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
   142  			k := (*stringStruct)(kptr)
   143  			if k.len != key.len || isEmpty(b.tophash[i]) {
   144  				if b.tophash[i] == emptyRest {
   145  					break
   146  				}
   147  				continue
   148  			}
   149  			if k.str == key.str {
   150  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
   151  			}
   152  			// check first 4 bytes
   153  			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
   154  				continue
   155  			}
   156  			// check last 4 bytes
   157  			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
   158  				continue
   159  			}
   160  			if keymaybe != bucketCnt {
   161  				// Two keys are potential matches. Use hash to distinguish them.
   162  				goto dohash
   163  			}
   164  			keymaybe = i
   165  		}
   166  		if keymaybe != bucketCnt {
   167  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
   168  			if memequal(k.str, key.str, uintptr(key.len)) {
   169  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
   170  			}
   171  		}
   172  		return unsafe.Pointer(&zeroVal[0]), false
   173  	}
   174  dohash:
   175  	hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   176  	m := bucketMask(h.B)
   177  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
   178  	if c := h.oldbuckets; c != nil {
   179  		if !h.sameSizeGrow() {
   180  			// There used to be half as many buckets; mask down one more power of two.
   181  			m >>= 1
   182  		}
   183  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
   184  		if !evacuated(oldb) {
   185  			b = oldb
   186  		}
   187  	}
   188  	top := tophash(hash)
   189  	for ; b != nil; b = b.overflow(t) {
   190  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
   191  			k := (*stringStruct)(kptr)
   192  			if k.len != key.len || b.tophash[i] != top {
   193  				continue
   194  			}
   195  			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   196  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
   197  			}
   198  		}
   199  	}
   200  	return unsafe.Pointer(&zeroVal[0]), false
   201  }
   202  
   203  func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
   204  	if h == nil {
   205  		panic(plainError("assignment to entry in nil map"))
   206  	}
   207  	if raceenabled {
   208  		callerpc := getcallerpc()
   209  		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
   210  	}
   211  	if h.flags&hashWriting != 0 {
   212  		fatal("concurrent map writes")
   213  	}
   214  	key := stringStructOf(&s)
   215  	hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
   216  
   217  	// Set hashWriting after calling t.hasher for consistency with mapassign.
   218  	h.flags ^= hashWriting
   219  
   220  	if h.buckets == nil {
   221  		h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
   222  	}
   223  
   224  again:
   225  	bucket := hash & bucketMask(h.B)
   226  	if h.growing() {
   227  		growWork_faststr(t, h, bucket)
   228  	}
   229  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   230  	top := tophash(hash)
   231  
   232  	var insertb *bmap
   233  	var inserti uintptr
   234  	var insertk unsafe.Pointer
   235  
   236  bucketloop:
   237  	for {
   238  		for i := uintptr(0); i < bucketCnt; i++ {
   239  			if b.tophash[i] != top {
   240  				if isEmpty(b.tophash[i]) && insertb == nil {
   241  					insertb = b
   242  					inserti = i
   243  				}
   244  				if b.tophash[i] == emptyRest {
   245  					break bucketloop
   246  				}
   247  				continue
   248  			}
   249  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
   250  			if k.len != key.len {
   251  				continue
   252  			}
   253  			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
   254  				continue
   255  			}
   256  			// already have a mapping for key. Update it.
   257  			inserti = i
   258  			insertb = b
   259  			// Overwrite existing key, so it can be garbage collected.
   260  			// The size is already guaranteed to be set correctly.
   261  			k.str = key.str
   262  			goto done
   263  		}
   264  		ovf := b.overflow(t)
   265  		if ovf == nil {
   266  			break
   267  		}
   268  		b = ovf
   269  	}
   270  
   271  	// Did not find mapping for key. Allocate new cell & add entry.
   272  
   273  	// If we hit the max load factor or we have too many overflow buckets,
   274  	// and we're not already in the middle of growing, start growing.
   275  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   276  		hashGrow(t, h)
   277  		goto again // Growing the table invalidates everything, so try again
   278  	}
   279  
   280  	if insertb == nil {
   281  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
   282  		insertb = h.newoverflow(t, b)
   283  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
   284  	}
   285  	insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
   286  
   287  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
   288  	// store new key at insert position
   289  	*((*stringStruct)(insertk)) = *key
   290  	h.count++
   291  
   292  done:
   293  	elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
   294  	if h.flags&hashWriting == 0 {
   295  		fatal("concurrent map writes")
   296  	}
   297  	h.flags &^= hashWriting
   298  	return elem
   299  }
   300  
   301  func mapdelete_faststr(t *maptype, h *hmap, ky string) {
   302  	if raceenabled && h != nil {
   303  		callerpc := getcallerpc()
   304  		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
   305  	}
   306  	if h == nil || h.count == 0 {
   307  		return
   308  	}
   309  	if h.flags&hashWriting != 0 {
   310  		fatal("concurrent map writes")
   311  	}
   312  
   313  	key := stringStructOf(&ky)
   314  	hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   315  
   316  	// Set hashWriting after calling t.hasher for consistency with mapdelete
   317  	h.flags ^= hashWriting
   318  
   319  	bucket := hash & bucketMask(h.B)
   320  	if h.growing() {
   321  		growWork_faststr(t, h, bucket)
   322  	}
   323  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   324  	bOrig := b
   325  	top := tophash(hash)
   326  search:
   327  	for ; b != nil; b = b.overflow(t) {
   328  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
   329  			k := (*stringStruct)(kptr)
   330  			if k.len != key.len || b.tophash[i] != top {
   331  				continue
   332  			}
   333  			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
   334  				continue
   335  			}
   336  			// Clear key's pointer.
   337  			k.str = nil
   338  			e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
   339  			if t.Elem.PtrBytes != 0 {
   340  				memclrHasPointers(e, t.Elem.Size_)
   341  			} else {
   342  				memclrNoHeapPointers(e, t.Elem.Size_)
   343  			}
   344  			b.tophash[i] = emptyOne
   345  			// If the bucket now ends in a bunch of emptyOne states,
   346  			// change those to emptyRest states.
   347  			if i == bucketCnt-1 {
   348  				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
   349  					goto notLast
   350  				}
   351  			} else {
   352  				if b.tophash[i+1] != emptyRest {
   353  					goto notLast
   354  				}
   355  			}
   356  			for {
   357  				b.tophash[i] = emptyRest
   358  				if i == 0 {
   359  					if b == bOrig {
   360  						break // beginning of initial bucket, we're done.
   361  					}
   362  					// Find previous bucket, continue at its last entry.
   363  					c := b
   364  					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
   365  					}
   366  					i = bucketCnt - 1
   367  				} else {
   368  					i--
   369  				}
   370  				if b.tophash[i] != emptyOne {
   371  					break
   372  				}
   373  			}
   374  		notLast:
   375  			h.count--
   376  			// Reset the hash seed to make it more difficult for attackers to
   377  			// repeatedly trigger hash collisions. See issue 25237.
   378  			if h.count == 0 {
   379  				h.hash0 = uint32(rand())
   380  			}
   381  			break search
   382  		}
   383  	}
   384  
   385  	if h.flags&hashWriting == 0 {
   386  		fatal("concurrent map writes")
   387  	}
   388  	h.flags &^= hashWriting
   389  }
   390  
   391  func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
   392  	// make sure we evacuate the oldbucket corresponding
   393  	// to the bucket we're about to use
   394  	evacuate_faststr(t, h, bucket&h.oldbucketmask())
   395  
   396  	// evacuate one more oldbucket to make progress on growing
   397  	if h.growing() {
   398  		evacuate_faststr(t, h, h.nevacuate)
   399  	}
   400  }
   401  
   402  func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
   403  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
   404  	newbit := h.noldbuckets()
   405  	if !evacuated(b) {
   406  		// TODO: reuse overflow buckets instead of using new ones, if there
   407  		// is no iterator using the old buckets.  (If !oldIterator.)
   408  
   409  		// xy contains the x and y (low and high) evacuation destinations.
   410  		var xy [2]evacDst
   411  		x := &xy[0]
   412  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
   413  		x.k = add(unsafe.Pointer(x.b), dataOffset)
   414  		x.e = add(x.k, bucketCnt*2*goarch.PtrSize)
   415  
   416  		if !h.sameSizeGrow() {
   417  			// Only calculate y pointers if we're growing bigger.
   418  			// Otherwise GC can see bad pointers.
   419  			y := &xy[1]
   420  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
   421  			y.k = add(unsafe.Pointer(y.b), dataOffset)
   422  			y.e = add(y.k, bucketCnt*2*goarch.PtrSize)
   423  		}
   424  
   425  		for ; b != nil; b = b.overflow(t) {
   426  			k := add(unsafe.Pointer(b), dataOffset)
   427  			e := add(k, bucketCnt*2*goarch.PtrSize)
   428  			for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
   429  				top := b.tophash[i]
   430  				if isEmpty(top) {
   431  					b.tophash[i] = evacuatedEmpty
   432  					continue
   433  				}
   434  				if top < minTopHash {
   435  					throw("bad map state")
   436  				}
   437  				var useY uint8
   438  				if !h.sameSizeGrow() {
   439  					// Compute hash to make our evacuation decision (whether we need
   440  					// to send this key/elem to bucket x or bucket y).
   441  					hash := t.Hasher(k, uintptr(h.hash0))
   442  					if hash&newbit != 0 {
   443  						useY = 1
   444  					}
   445  				}
   446  
   447  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
   448  				dst := &xy[useY]                 // evacuation destination
   449  
   450  				if dst.i == bucketCnt {
   451  					dst.b = h.newoverflow(t, dst.b)
   452  					dst.i = 0
   453  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
   454  					dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize)
   455  				}
   456  				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
   457  
   458  				// Copy key.
   459  				*(*string)(dst.k) = *(*string)(k)
   460  
   461  				typedmemmove(t.Elem, dst.e, e)
   462  				dst.i++
   463  				// These updates might push these pointers past the end of the
   464  				// key or elem arrays.  That's ok, as we have the overflow pointer
   465  				// at the end of the bucket to protect against pointing past the
   466  				// end of the bucket.
   467  				dst.k = add(dst.k, 2*goarch.PtrSize)
   468  				dst.e = add(dst.e, uintptr(t.ValueSize))
   469  			}
   470  		}
   471  		// Unlink the overflow buckets & clear key/elem to help GC.
   472  		if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
   473  			b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
   474  			// Preserve b.tophash because the evacuation
   475  			// state is maintained there.
   476  			ptr := add(b, dataOffset)
   477  			n := uintptr(t.BucketSize) - dataOffset
   478  			memclrHasPointers(ptr, n)
   479  		}
   480  	}
   481  
   482  	if oldbucket == h.nevacuate {
   483  		advanceEvacuationMark(h, t, newbit)
   484  	}
   485  }
   486  

View as plain text