...
Run Format

Source file src/runtime/map_faststr.go

Documentation: runtime

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/sys"
     9  	"unsafe"
    10  )
    11  
    12  func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
    13  	if raceenabled && h != nil {
    14  		callerpc := getcallerpc()
    15  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
    16  	}
    17  	if h == nil || h.count == 0 {
    18  		return unsafe.Pointer(&zeroVal[0])
    19  	}
    20  	if h.flags&hashWriting != 0 {
    21  		throw("concurrent map read and map write")
    22  	}
    23  	key := stringStructOf(&ky)
    24  	if h.B == 0 {
    25  		// One-bucket table.
    26  		b := (*bmap)(h.buckets)
    27  		if key.len < 32 {
    28  			// short key, doing lots of comparisons is ok
    29  			for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
    30  				k := (*stringStruct)(kptr)
    31  				if k.len != key.len || b.tophash[i] == empty {
    32  					continue
    33  				}
    34  				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
    35  					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
    36  				}
    37  			}
    38  			return unsafe.Pointer(&zeroVal[0])
    39  		}
    40  		// long key, try not to do more comparisons than necessary
    41  		keymaybe := uintptr(bucketCnt)
    42  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
    43  			k := (*stringStruct)(kptr)
    44  			if k.len != key.len || b.tophash[i] == empty {
    45  				continue
    46  			}
    47  			if k.str == key.str {
    48  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
    49  			}
    50  			// check first 4 bytes
    51  			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
    52  				continue
    53  			}
    54  			// check last 4 bytes
    55  			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
    56  				continue
    57  			}
    58  			if keymaybe != bucketCnt {
    59  				// Two keys are potential matches. Use hash to distinguish them.
    60  				goto dohash
    61  			}
    62  			keymaybe = i
    63  		}
    64  		if keymaybe != bucketCnt {
    65  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
    66  			if memequal(k.str, key.str, uintptr(key.len)) {
    67  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
    68  			}
    69  		}
    70  		return unsafe.Pointer(&zeroVal[0])
    71  	}
    72  dohash:
    73  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
    74  	m := bucketMask(h.B)
    75  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    76  	if c := h.oldbuckets; c != nil {
    77  		if !h.sameSizeGrow() {
    78  			// There used to be half as many buckets; mask down one more power of two.
    79  			m >>= 1
    80  		}
    81  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
    82  		if !evacuated(oldb) {
    83  			b = oldb
    84  		}
    85  	}
    86  	top := tophash(hash)
    87  	for ; b != nil; b = b.overflow(t) {
    88  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
    89  			k := (*stringStruct)(kptr)
    90  			if k.len != key.len || b.tophash[i] != top {
    91  				continue
    92  			}
    93  			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
    94  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
    95  			}
    96  		}
    97  	}
    98  	return unsafe.Pointer(&zeroVal[0])
    99  }
   100  
   101  func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
   102  	if raceenabled && h != nil {
   103  		callerpc := getcallerpc()
   104  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
   105  	}
   106  	if h == nil || h.count == 0 {
   107  		return unsafe.Pointer(&zeroVal[0]), false
   108  	}
   109  	if h.flags&hashWriting != 0 {
   110  		throw("concurrent map read and map write")
   111  	}
   112  	key := stringStructOf(&ky)
   113  	if h.B == 0 {
   114  		// One-bucket table.
   115  		b := (*bmap)(h.buckets)
   116  		if key.len < 32 {
   117  			// short key, doing lots of comparisons is ok
   118  			for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
   119  				k := (*stringStruct)(kptr)
   120  				if k.len != key.len || b.tophash[i] == empty {
   121  					continue
   122  				}
   123  				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   124  					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   125  				}
   126  			}
   127  			return unsafe.Pointer(&zeroVal[0]), false
   128  		}
   129  		// long key, try not to do more comparisons than necessary
   130  		keymaybe := uintptr(bucketCnt)
   131  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
   132  			k := (*stringStruct)(kptr)
   133  			if k.len != key.len || b.tophash[i] == empty {
   134  				continue
   135  			}
   136  			if k.str == key.str {
   137  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   138  			}
   139  			// check first 4 bytes
   140  			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
   141  				continue
   142  			}
   143  			// check last 4 bytes
   144  			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
   145  				continue
   146  			}
   147  			if keymaybe != bucketCnt {
   148  				// Two keys are potential matches. Use hash to distinguish them.
   149  				goto dohash
   150  			}
   151  			keymaybe = i
   152  		}
   153  		if keymaybe != bucketCnt {
   154  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
   155  			if memequal(k.str, key.str, uintptr(key.len)) {
   156  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
   157  			}
   158  		}
   159  		return unsafe.Pointer(&zeroVal[0]), false
   160  	}
   161  dohash:
   162  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   163  	m := bucketMask(h.B)
   164  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   165  	if c := h.oldbuckets; c != nil {
   166  		if !h.sameSizeGrow() {
   167  			// There used to be half as many buckets; mask down one more power of two.
   168  			m >>= 1
   169  		}
   170  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   171  		if !evacuated(oldb) {
   172  			b = oldb
   173  		}
   174  	}
   175  	top := tophash(hash)
   176  	for ; b != nil; b = b.overflow(t) {
   177  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
   178  			k := (*stringStruct)(kptr)
   179  			if k.len != key.len || b.tophash[i] != top {
   180  				continue
   181  			}
   182  			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   183  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   184  			}
   185  		}
   186  	}
   187  	return unsafe.Pointer(&zeroVal[0]), false
   188  }
   189  
   190  func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
   191  	if h == nil {
   192  		panic(plainError("assignment to entry in nil map"))
   193  	}
   194  	if raceenabled {
   195  		callerpc := getcallerpc()
   196  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
   197  	}
   198  	if h.flags&hashWriting != 0 {
   199  		throw("concurrent map writes")
   200  	}
   201  	key := stringStructOf(&s)
   202  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
   203  
   204  	// Set hashWriting after calling alg.hash for consistency with mapassign.
   205  	h.flags |= hashWriting
   206  
   207  	if h.buckets == nil {
   208  		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
   209  	}
   210  
   211  again:
   212  	bucket := hash & bucketMask(h.B)
   213  	if h.growing() {
   214  		growWork_faststr(t, h, bucket)
   215  	}
   216  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   217  	top := tophash(hash)
   218  
   219  	var insertb *bmap
   220  	var inserti uintptr
   221  	var insertk unsafe.Pointer
   222  
   223  	for {
   224  		for i := uintptr(0); i < bucketCnt; i++ {
   225  			if b.tophash[i] != top {
   226  				if b.tophash[i] == empty && insertb == nil {
   227  					insertb = b
   228  					inserti = i
   229  				}
   230  				continue
   231  			}
   232  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   233  			if k.len != key.len {
   234  				continue
   235  			}
   236  			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
   237  				continue
   238  			}
   239  			// already have a mapping for key. Update it.
   240  			inserti = i
   241  			insertb = b
   242  			goto done
   243  		}
   244  		ovf := b.overflow(t)
   245  		if ovf == nil {
   246  			break
   247  		}
   248  		b = ovf
   249  	}
   250  
   251  	// Did not find mapping for key. Allocate new cell & add entry.
   252  
   253  	// If we hit the max load factor or we have too many overflow buckets,
   254  	// and we're not already in the middle of growing, start growing.
   255  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   256  		hashGrow(t, h)
   257  		goto again // Growing the table invalidates everything, so try again
   258  	}
   259  
   260  	if insertb == nil {
   261  		// all current buckets are full, allocate a new one.
   262  		insertb = h.newoverflow(t, b)
   263  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
   264  	}
   265  	insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
   266  
   267  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*sys.PtrSize)
   268  	// store new key at insert position
   269  	*((*stringStruct)(insertk)) = *key
   270  	h.count++
   271  
   272  done:
   273  	val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.valuesize))
   274  	if h.flags&hashWriting == 0 {
   275  		throw("concurrent map writes")
   276  	}
   277  	h.flags &^= hashWriting
   278  	return val
   279  }
   280  
   281  func mapdelete_faststr(t *maptype, h *hmap, ky string) {
   282  	if raceenabled && h != nil {
   283  		callerpc := getcallerpc()
   284  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
   285  	}
   286  	if h == nil || h.count == 0 {
   287  		return
   288  	}
   289  	if h.flags&hashWriting != 0 {
   290  		throw("concurrent map writes")
   291  	}
   292  
   293  	key := stringStructOf(&ky)
   294  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   295  
   296  	// Set hashWriting after calling alg.hash for consistency with mapdelete
   297  	h.flags |= hashWriting
   298  
   299  	bucket := hash & bucketMask(h.B)
   300  	if h.growing() {
   301  		growWork_faststr(t, h, bucket)
   302  	}
   303  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
   304  	top := tophash(hash)
   305  search:
   306  	for ; b != nil; b = b.overflow(t) {
   307  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
   308  			k := (*stringStruct)(kptr)
   309  			if k.len != key.len || b.tophash[i] != top {
   310  				continue
   311  			}
   312  			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
   313  				continue
   314  			}
   315  			// Clear key's pointer.
   316  			k.str = nil
   317  			v := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
   318  			if t.elem.kind&kindNoPointers == 0 {
   319  				memclrHasPointers(v, t.elem.size)
   320  			} else {
   321  				memclrNoHeapPointers(v, t.elem.size)
   322  			}
   323  			b.tophash[i] = empty
   324  			h.count--
   325  			break search
   326  		}
   327  	}
   328  
   329  	if h.flags&hashWriting == 0 {
   330  		throw("concurrent map writes")
   331  	}
   332  	h.flags &^= hashWriting
   333  }
   334  
   335  func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
   336  	// make sure we evacuate the oldbucket corresponding
   337  	// to the bucket we're about to use
   338  	evacuate_faststr(t, h, bucket&h.oldbucketmask())
   339  
   340  	// evacuate one more oldbucket to make progress on growing
   341  	if h.growing() {
   342  		evacuate_faststr(t, h, h.nevacuate)
   343  	}
   344  }
   345  
   346  func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
   347  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   348  	newbit := h.noldbuckets()
   349  	if !evacuated(b) {
   350  		// TODO: reuse overflow buckets instead of using new ones, if there
   351  		// is no iterator using the old buckets.  (If !oldIterator.)
   352  
   353  		// xy contains the x and y (low and high) evacuation destinations.
   354  		var xy [2]evacDst
   355  		x := &xy[0]
   356  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
   357  		x.k = add(unsafe.Pointer(x.b), dataOffset)
   358  		x.v = add(x.k, bucketCnt*2*sys.PtrSize)
   359  
   360  		if !h.sameSizeGrow() {
   361  			// Only calculate y pointers if we're growing bigger.
   362  			// Otherwise GC can see bad pointers.
   363  			y := &xy[1]
   364  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
   365  			y.k = add(unsafe.Pointer(y.b), dataOffset)
   366  			y.v = add(y.k, bucketCnt*2*sys.PtrSize)
   367  		}
   368  
   369  		for ; b != nil; b = b.overflow(t) {
   370  			k := add(unsafe.Pointer(b), dataOffset)
   371  			v := add(k, bucketCnt*2*sys.PtrSize)
   372  			for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 2*sys.PtrSize), add(v, uintptr(t.valuesize)) {
   373  				top := b.tophash[i]
   374  				if top == empty {
   375  					b.tophash[i] = evacuatedEmpty
   376  					continue
   377  				}
   378  				if top < minTopHash {
   379  					throw("bad map state")
   380  				}
   381  				var useY uint8
   382  				if !h.sameSizeGrow() {
   383  					// Compute hash to make our evacuation decision (whether we need
   384  					// to send this key/value to bucket x or bucket y).
   385  					hash := t.key.alg.hash(k, uintptr(h.hash0))
   386  					if hash&newbit != 0 {
   387  						useY = 1
   388  					}
   389  				}
   390  
   391  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
   392  				dst := &xy[useY]                 // evacuation destination
   393  
   394  				if dst.i == bucketCnt {
   395  					dst.b = h.newoverflow(t, dst.b)
   396  					dst.i = 0
   397  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
   398  					dst.v = add(dst.k, bucketCnt*2*sys.PtrSize)
   399  				}
   400  				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
   401  
   402  				// Copy key.
   403  				*(*string)(dst.k) = *(*string)(k)
   404  
   405  				typedmemmove(t.elem, dst.v, v)
   406  				dst.i++
   407  				// These updates might push these pointers past the end of the
   408  				// key or value arrays.  That's ok, as we have the overflow pointer
   409  				// at the end of the bucket to protect against pointing past the
   410  				// end of the bucket.
   411  				dst.k = add(dst.k, 2*sys.PtrSize)
   412  				dst.v = add(dst.v, uintptr(t.valuesize))
   413  			}
   414  		}
   415  		// Unlink the overflow buckets & clear key/value to help GC.
   416  		// Unlink the overflow buckets & clear key/value to help GC.
   417  		if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
   418  			b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
   419  			// Preserve b.tophash because the evacuation
   420  			// state is maintained there.
   421  			ptr := add(b, dataOffset)
   422  			n := uintptr(t.bucketsize) - dataOffset
   423  			memclrHasPointers(ptr, n)
   424  		}
   425  	}
   426  
   427  	if oldbucket == h.nevacuate {
   428  		advanceEvacuationMark(h, t, newbit)
   429  	}
   430  }
   431  

View as plain text