...
Run Format

Source file src/runtime/hashmap_fast.go

     1	// Copyright 2014 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	package runtime
     6	
     7	import (
     8		"runtime/internal/sys"
     9		"unsafe"
    10	)
    11	
    12	func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
    13		if raceenabled && h != nil {
    14			callerpc := getcallerpc(unsafe.Pointer(&t))
    15			racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
    16		}
    17		if h == nil || h.count == 0 {
    18			return unsafe.Pointer(&zeroVal[0])
    19		}
    20		if h.flags&hashWriting != 0 {
    21			throw("concurrent map read and map write")
    22		}
    23		var b *bmap
    24		if h.B == 0 {
    25			// One-bucket table. No need to hash.
    26			b = (*bmap)(h.buckets)
    27		} else {
    28			hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    29			m := uintptr(1)<<h.B - 1
    30			b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    31			if c := h.oldbuckets; c != nil {
    32				oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
    33				if !evacuated(oldb) {
    34					b = oldb
    35				}
    36			}
    37		}
    38		for {
    39			for i := uintptr(0); i < bucketCnt; i++ {
    40				k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
    41				if k != key {
    42					continue
    43				}
    44				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
    45				if x == empty {
    46					continue
    47				}
    48				return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
    49			}
    50			b = b.overflow(t)
    51			if b == nil {
    52				return unsafe.Pointer(&zeroVal[0])
    53			}
    54		}
    55	}
    56	
    57	func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
    58		if raceenabled && h != nil {
    59			callerpc := getcallerpc(unsafe.Pointer(&t))
    60			racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
    61		}
    62		if h == nil || h.count == 0 {
    63			return unsafe.Pointer(&zeroVal[0]), false
    64		}
    65		if h.flags&hashWriting != 0 {
    66			throw("concurrent map read and map write")
    67		}
    68		var b *bmap
    69		if h.B == 0 {
    70			// One-bucket table. No need to hash.
    71			b = (*bmap)(h.buckets)
    72		} else {
    73			hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    74			m := uintptr(1)<<h.B - 1
    75			b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    76			if c := h.oldbuckets; c != nil {
    77				oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
    78				if !evacuated(oldb) {
    79					b = oldb
    80				}
    81			}
    82		}
    83		for {
    84			for i := uintptr(0); i < bucketCnt; i++ {
    85				k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
    86				if k != key {
    87					continue
    88				}
    89				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
    90				if x == empty {
    91					continue
    92				}
    93				return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
    94			}
    95			b = b.overflow(t)
    96			if b == nil {
    97				return unsafe.Pointer(&zeroVal[0]), false
    98			}
    99		}
   100	}
   101	
   102	func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
   103		if raceenabled && h != nil {
   104			callerpc := getcallerpc(unsafe.Pointer(&t))
   105			racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
   106		}
   107		if h == nil || h.count == 0 {
   108			return unsafe.Pointer(&zeroVal[0])
   109		}
   110		if h.flags&hashWriting != 0 {
   111			throw("concurrent map read and map write")
   112		}
   113		var b *bmap
   114		if h.B == 0 {
   115			// One-bucket table. No need to hash.
   116			b = (*bmap)(h.buckets)
   117		} else {
   118			hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   119			m := uintptr(1)<<h.B - 1
   120			b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   121			if c := h.oldbuckets; c != nil {
   122				oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
   123				if !evacuated(oldb) {
   124					b = oldb
   125				}
   126			}
   127		}
   128		for {
   129			for i := uintptr(0); i < bucketCnt; i++ {
   130				k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
   131				if k != key {
   132					continue
   133				}
   134				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   135				if x == empty {
   136					continue
   137				}
   138				return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
   139			}
   140			b = b.overflow(t)
   141			if b == nil {
   142				return unsafe.Pointer(&zeroVal[0])
   143			}
   144		}
   145	}
   146	
   147	func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
   148		if raceenabled && h != nil {
   149			callerpc := getcallerpc(unsafe.Pointer(&t))
   150			racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
   151		}
   152		if h == nil || h.count == 0 {
   153			return unsafe.Pointer(&zeroVal[0]), false
   154		}
   155		if h.flags&hashWriting != 0 {
   156			throw("concurrent map read and map write")
   157		}
   158		var b *bmap
   159		if h.B == 0 {
   160			// One-bucket table. No need to hash.
   161			b = (*bmap)(h.buckets)
   162		} else {
   163			hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   164			m := uintptr(1)<<h.B - 1
   165			b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   166			if c := h.oldbuckets; c != nil {
   167				oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
   168				if !evacuated(oldb) {
   169					b = oldb
   170				}
   171			}
   172		}
   173		for {
   174			for i := uintptr(0); i < bucketCnt; i++ {
   175				k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
   176				if k != key {
   177					continue
   178				}
   179				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   180				if x == empty {
   181					continue
   182				}
   183				return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
   184			}
   185			b = b.overflow(t)
   186			if b == nil {
   187				return unsafe.Pointer(&zeroVal[0]), false
   188			}
   189		}
   190	}
   191	
   192	func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
   193		if raceenabled && h != nil {
   194			callerpc := getcallerpc(unsafe.Pointer(&t))
   195			racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
   196		}
   197		if h == nil || h.count == 0 {
   198			return unsafe.Pointer(&zeroVal[0])
   199		}
   200		if h.flags&hashWriting != 0 {
   201			throw("concurrent map read and map write")
   202		}
   203		key := stringStructOf(&ky)
   204		if h.B == 0 {
   205			// One-bucket table.
   206			b := (*bmap)(h.buckets)
   207			if key.len < 32 {
   208				// short key, doing lots of comparisons is ok
   209				for i := uintptr(0); i < bucketCnt; i++ {
   210					x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   211					if x == empty {
   212						continue
   213					}
   214					k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   215					if k.len != key.len {
   216						continue
   217					}
   218					if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   219						return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
   220					}
   221				}
   222				return unsafe.Pointer(&zeroVal[0])
   223			}
   224			// long key, try not to do more comparisons than necessary
   225			keymaybe := uintptr(bucketCnt)
   226			for i := uintptr(0); i < bucketCnt; i++ {
   227				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   228				if x == empty {
   229					continue
   230				}
   231				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   232				if k.len != key.len {
   233					continue
   234				}
   235				if k.str == key.str {
   236					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
   237				}
   238				// check first 4 bytes
   239				// TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
   240				// four 1-byte comparisons.
   241				if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
   242					continue
   243				}
   244				// check last 4 bytes
   245				if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
   246					continue
   247				}
   248				if keymaybe != bucketCnt {
   249					// Two keys are potential matches. Use hash to distinguish them.
   250					goto dohash
   251				}
   252				keymaybe = i
   253			}
   254			if keymaybe != bucketCnt {
   255				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
   256				if memequal(k.str, key.str, uintptr(key.len)) {
   257					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
   258				}
   259			}
   260			return unsafe.Pointer(&zeroVal[0])
   261		}
   262	dohash:
   263		hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   264		m := uintptr(1)<<h.B - 1
   265		b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   266		if c := h.oldbuckets; c != nil {
   267			oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
   268			if !evacuated(oldb) {
   269				b = oldb
   270			}
   271		}
   272		top := uint8(hash >> (sys.PtrSize*8 - 8))
   273		if top < minTopHash {
   274			top += minTopHash
   275		}
   276		for {
   277			for i := uintptr(0); i < bucketCnt; i++ {
   278				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   279				if x != top {
   280					continue
   281				}
   282				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   283				if k.len != key.len {
   284					continue
   285				}
   286				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   287					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
   288				}
   289			}
   290			b = b.overflow(t)
   291			if b == nil {
   292				return unsafe.Pointer(&zeroVal[0])
   293			}
   294		}
   295	}
   296	
   297	func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
   298		if raceenabled && h != nil {
   299			callerpc := getcallerpc(unsafe.Pointer(&t))
   300			racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
   301		}
   302		if h == nil || h.count == 0 {
   303			return unsafe.Pointer(&zeroVal[0]), false
   304		}
   305		if h.flags&hashWriting != 0 {
   306			throw("concurrent map read and map write")
   307		}
   308		key := stringStructOf(&ky)
   309		if h.B == 0 {
   310			// One-bucket table.
   311			b := (*bmap)(h.buckets)
   312			if key.len < 32 {
   313				// short key, doing lots of comparisons is ok
   314				for i := uintptr(0); i < bucketCnt; i++ {
   315					x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   316					if x == empty {
   317						continue
   318					}
   319					k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   320					if k.len != key.len {
   321						continue
   322					}
   323					if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   324						return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   325					}
   326				}
   327				return unsafe.Pointer(&zeroVal[0]), false
   328			}
   329			// long key, try not to do more comparisons than necessary
   330			keymaybe := uintptr(bucketCnt)
   331			for i := uintptr(0); i < bucketCnt; i++ {
   332				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   333				if x == empty {
   334					continue
   335				}
   336				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   337				if k.len != key.len {
   338					continue
   339				}
   340				if k.str == key.str {
   341					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   342				}
   343				// check first 4 bytes
   344				if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
   345					continue
   346				}
   347				// check last 4 bytes
   348				if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
   349					continue
   350				}
   351				if keymaybe != bucketCnt {
   352					// Two keys are potential matches. Use hash to distinguish them.
   353					goto dohash
   354				}
   355				keymaybe = i
   356			}
   357			if keymaybe != bucketCnt {
   358				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
   359				if memequal(k.str, key.str, uintptr(key.len)) {
   360					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
   361				}
   362			}
   363			return unsafe.Pointer(&zeroVal[0]), false
   364		}
   365	dohash:
   366		hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   367		m := uintptr(1)<<h.B - 1
   368		b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   369		if c := h.oldbuckets; c != nil {
   370			oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
   371			if !evacuated(oldb) {
   372				b = oldb
   373			}
   374		}
   375		top := uint8(hash >> (sys.PtrSize*8 - 8))
   376		if top < minTopHash {
   377			top += minTopHash
   378		}
   379		for {
   380			for i := uintptr(0); i < bucketCnt; i++ {
   381				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   382				if x != top {
   383					continue
   384				}
   385				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   386				if k.len != key.len {
   387					continue
   388				}
   389				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   390					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   391				}
   392			}
   393			b = b.overflow(t)
   394			if b == nil {
   395				return unsafe.Pointer(&zeroVal[0]), false
   396			}
   397		}
   398	}
   399	

View as plain text