...
Run Format

Source file src/runtime/hashmap_fast.go

     1	// Copyright 2014 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	package runtime
     6	
     7	import (
     8		"runtime/internal/sys"
     9		"unsafe"
    10	)
    11	
    12	func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
    13		if raceenabled && h != nil {
    14			callerpc := getcallerpc(unsafe.Pointer(&t))
    15			racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
    16		}
    17		if h == nil || h.count == 0 {
    18			return unsafe.Pointer(&zeroVal[0])
    19		}
    20		if h.flags&hashWriting != 0 {
    21			throw("concurrent map read and map write")
    22		}
    23		var b *bmap
    24		if h.B == 0 {
    25			// One-bucket table. No need to hash.
    26			b = (*bmap)(h.buckets)
    27		} else {
    28			hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    29			m := uintptr(1)<<h.B - 1
    30			b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    31			if c := h.oldbuckets; c != nil {
    32				if !h.sameSizeGrow() {
    33					// There used to be half as many buckets; mask down one more power of two.
    34					m >>= 1
    35				}
    36				oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
    37				if !evacuated(oldb) {
    38					b = oldb
    39				}
    40			}
    41		}
    42		for {
    43			for i := uintptr(0); i < bucketCnt; i++ {
    44				k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
    45				if k != key {
    46					continue
    47				}
    48				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
    49				if x == empty {
    50					continue
    51				}
    52				return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
    53			}
    54			b = b.overflow(t)
    55			if b == nil {
    56				return unsafe.Pointer(&zeroVal[0])
    57			}
    58		}
    59	}
    60	
    61	func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
    62		if raceenabled && h != nil {
    63			callerpc := getcallerpc(unsafe.Pointer(&t))
    64			racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
    65		}
    66		if h == nil || h.count == 0 {
    67			return unsafe.Pointer(&zeroVal[0]), false
    68		}
    69		if h.flags&hashWriting != 0 {
    70			throw("concurrent map read and map write")
    71		}
    72		var b *bmap
    73		if h.B == 0 {
    74			// One-bucket table. No need to hash.
    75			b = (*bmap)(h.buckets)
    76		} else {
    77			hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    78			m := uintptr(1)<<h.B - 1
    79			b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    80			if c := h.oldbuckets; c != nil {
    81				if !h.sameSizeGrow() {
    82					// There used to be half as many buckets; mask down one more power of two.
    83					m >>= 1
    84				}
    85				oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
    86				if !evacuated(oldb) {
    87					b = oldb
    88				}
    89			}
    90		}
    91		for {
    92			for i := uintptr(0); i < bucketCnt; i++ {
    93				k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
    94				if k != key {
    95					continue
    96				}
    97				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
    98				if x == empty {
    99					continue
   100				}
   101				return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
   102			}
   103			b = b.overflow(t)
   104			if b == nil {
   105				return unsafe.Pointer(&zeroVal[0]), false
   106			}
   107		}
   108	}
   109	
   110	func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
   111		if raceenabled && h != nil {
   112			callerpc := getcallerpc(unsafe.Pointer(&t))
   113			racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
   114		}
   115		if h == nil || h.count == 0 {
   116			return unsafe.Pointer(&zeroVal[0])
   117		}
   118		if h.flags&hashWriting != 0 {
   119			throw("concurrent map read and map write")
   120		}
   121		var b *bmap
   122		if h.B == 0 {
   123			// One-bucket table. No need to hash.
   124			b = (*bmap)(h.buckets)
   125		} else {
   126			hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   127			m := uintptr(1)<<h.B - 1
   128			b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   129			if c := h.oldbuckets; c != nil {
   130				if !h.sameSizeGrow() {
   131					// There used to be half as many buckets; mask down one more power of two.
   132					m >>= 1
   133				}
   134				oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   135				if !evacuated(oldb) {
   136					b = oldb
   137				}
   138			}
   139		}
   140		for {
   141			for i := uintptr(0); i < bucketCnt; i++ {
   142				k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
   143				if k != key {
   144					continue
   145				}
   146				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   147				if x == empty {
   148					continue
   149				}
   150				return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
   151			}
   152			b = b.overflow(t)
   153			if b == nil {
   154				return unsafe.Pointer(&zeroVal[0])
   155			}
   156		}
   157	}
   158	
   159	func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
   160		if raceenabled && h != nil {
   161			callerpc := getcallerpc(unsafe.Pointer(&t))
   162			racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
   163		}
   164		if h == nil || h.count == 0 {
   165			return unsafe.Pointer(&zeroVal[0]), false
   166		}
   167		if h.flags&hashWriting != 0 {
   168			throw("concurrent map read and map write")
   169		}
   170		var b *bmap
   171		if h.B == 0 {
   172			// One-bucket table. No need to hash.
   173			b = (*bmap)(h.buckets)
   174		} else {
   175			hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   176			m := uintptr(1)<<h.B - 1
   177			b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   178			if c := h.oldbuckets; c != nil {
   179				if !h.sameSizeGrow() {
   180					// There used to be half as many buckets; mask down one more power of two.
   181					m >>= 1
   182				}
   183				oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   184				if !evacuated(oldb) {
   185					b = oldb
   186				}
   187			}
   188		}
   189		for {
   190			for i := uintptr(0); i < bucketCnt; i++ {
   191				k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
   192				if k != key {
   193					continue
   194				}
   195				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   196				if x == empty {
   197					continue
   198				}
   199				return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
   200			}
   201			b = b.overflow(t)
   202			if b == nil {
   203				return unsafe.Pointer(&zeroVal[0]), false
   204			}
   205		}
   206	}
   207	
   208	func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
   209		if raceenabled && h != nil {
   210			callerpc := getcallerpc(unsafe.Pointer(&t))
   211			racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
   212		}
   213		if h == nil || h.count == 0 {
   214			return unsafe.Pointer(&zeroVal[0])
   215		}
   216		if h.flags&hashWriting != 0 {
   217			throw("concurrent map read and map write")
   218		}
   219		key := stringStructOf(&ky)
   220		if h.B == 0 {
   221			// One-bucket table.
   222			b := (*bmap)(h.buckets)
   223			if key.len < 32 {
   224				// short key, doing lots of comparisons is ok
   225				for i := uintptr(0); i < bucketCnt; i++ {
   226					x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   227					if x == empty {
   228						continue
   229					}
   230					k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   231					if k.len != key.len {
   232						continue
   233					}
   234					if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   235						return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
   236					}
   237				}
   238				return unsafe.Pointer(&zeroVal[0])
   239			}
   240			// long key, try not to do more comparisons than necessary
   241			keymaybe := uintptr(bucketCnt)
   242			for i := uintptr(0); i < bucketCnt; i++ {
   243				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   244				if x == empty {
   245					continue
   246				}
   247				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   248				if k.len != key.len {
   249					continue
   250				}
   251				if k.str == key.str {
   252					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
   253				}
   254				// check first 4 bytes
   255				// TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
   256				// four 1-byte comparisons.
   257				if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
   258					continue
   259				}
   260				// check last 4 bytes
   261				if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
   262					continue
   263				}
   264				if keymaybe != bucketCnt {
   265					// Two keys are potential matches. Use hash to distinguish them.
   266					goto dohash
   267				}
   268				keymaybe = i
   269			}
   270			if keymaybe != bucketCnt {
   271				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
   272				if memequal(k.str, key.str, uintptr(key.len)) {
   273					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
   274				}
   275			}
   276			return unsafe.Pointer(&zeroVal[0])
   277		}
   278	dohash:
   279		hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   280		m := uintptr(1)<<h.B - 1
   281		b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   282		if c := h.oldbuckets; c != nil {
   283			if !h.sameSizeGrow() {
   284				// There used to be half as many buckets; mask down one more power of two.
   285				m >>= 1
   286			}
   287			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   288			if !evacuated(oldb) {
   289				b = oldb
   290			}
   291		}
   292		top := uint8(hash >> (sys.PtrSize*8 - 8))
   293		if top < minTopHash {
   294			top += minTopHash
   295		}
   296		for {
   297			for i := uintptr(0); i < bucketCnt; i++ {
   298				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   299				if x != top {
   300					continue
   301				}
   302				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   303				if k.len != key.len {
   304					continue
   305				}
   306				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   307					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
   308				}
   309			}
   310			b = b.overflow(t)
   311			if b == nil {
   312				return unsafe.Pointer(&zeroVal[0])
   313			}
   314		}
   315	}
   316	
   317	func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
   318		if raceenabled && h != nil {
   319			callerpc := getcallerpc(unsafe.Pointer(&t))
   320			racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
   321		}
   322		if h == nil || h.count == 0 {
   323			return unsafe.Pointer(&zeroVal[0]), false
   324		}
   325		if h.flags&hashWriting != 0 {
   326			throw("concurrent map read and map write")
   327		}
   328		key := stringStructOf(&ky)
   329		if h.B == 0 {
   330			// One-bucket table.
   331			b := (*bmap)(h.buckets)
   332			if key.len < 32 {
   333				// short key, doing lots of comparisons is ok
   334				for i := uintptr(0); i < bucketCnt; i++ {
   335					x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   336					if x == empty {
   337						continue
   338					}
   339					k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   340					if k.len != key.len {
   341						continue
   342					}
   343					if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   344						return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   345					}
   346				}
   347				return unsafe.Pointer(&zeroVal[0]), false
   348			}
   349			// long key, try not to do more comparisons than necessary
   350			keymaybe := uintptr(bucketCnt)
   351			for i := uintptr(0); i < bucketCnt; i++ {
   352				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   353				if x == empty {
   354					continue
   355				}
   356				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   357				if k.len != key.len {
   358					continue
   359				}
   360				if k.str == key.str {
   361					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   362				}
   363				// check first 4 bytes
   364				if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
   365					continue
   366				}
   367				// check last 4 bytes
   368				if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
   369					continue
   370				}
   371				if keymaybe != bucketCnt {
   372					// Two keys are potential matches. Use hash to distinguish them.
   373					goto dohash
   374				}
   375				keymaybe = i
   376			}
   377			if keymaybe != bucketCnt {
   378				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
   379				if memequal(k.str, key.str, uintptr(key.len)) {
   380					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
   381				}
   382			}
   383			return unsafe.Pointer(&zeroVal[0]), false
   384		}
   385	dohash:
   386		hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   387		m := uintptr(1)<<h.B - 1
   388		b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   389		if c := h.oldbuckets; c != nil {
   390			if !h.sameSizeGrow() {
   391				// There used to be half as many buckets; mask down one more power of two.
   392				m >>= 1
   393			}
   394			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   395			if !evacuated(oldb) {
   396				b = oldb
   397			}
   398		}
   399		top := uint8(hash >> (sys.PtrSize*8 - 8))
   400		if top < minTopHash {
   401			top += minTopHash
   402		}
   403		for {
   404			for i := uintptr(0); i < bucketCnt; i++ {
   405				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   406				if x != top {
   407					continue
   408				}
   409				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   410				if k.len != key.len {
   411					continue
   412				}
   413				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   414					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   415				}
   416			}
   417			b = b.overflow(t)
   418			if b == nil {
   419				return unsafe.Pointer(&zeroVal[0]), false
   420			}
   421		}
   422	}
   423	

View as plain text