...
Run Format

Source file src/runtime/hash64.go

Documentation: runtime

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Hashing algorithm inspired by
     6  //   xxhash: https://code.google.com/p/xxhash/
     7  // cityhash: https://code.google.com/p/cityhash/
     8  
     9  // +build amd64 amd64p32 arm64 mips64 mips64le ppc64 ppc64le s390x wasm
    10  
    11  package runtime
    12  
    13  import "unsafe"
    14  
    15  const (
    16  	// Constants for multiplication: four random odd 64-bit numbers.
    17  	m1 = 16877499708836156737
    18  	m2 = 2820277070424839065
    19  	m3 = 9497967016996688599
    20  	m4 = 15839092249703872147
    21  )
    22  
    23  func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
    24  	if (GOARCH == "amd64" || GOARCH == "arm64") &&
    25  		GOOS != "nacl" && useAeshash {
    26  		return aeshash(p, seed, s)
    27  	}
    28  	h := uint64(seed + s*hashkey[0])
    29  tail:
    30  	switch {
    31  	case s == 0:
    32  	case s < 4:
    33  		h ^= uint64(*(*byte)(p))
    34  		h ^= uint64(*(*byte)(add(p, s>>1))) << 8
    35  		h ^= uint64(*(*byte)(add(p, s-1))) << 16
    36  		h = rotl_31(h*m1) * m2
    37  	case s <= 8:
    38  		h ^= uint64(readUnaligned32(p))
    39  		h ^= uint64(readUnaligned32(add(p, s-4))) << 32
    40  		h = rotl_31(h*m1) * m2
    41  	case s <= 16:
    42  		h ^= readUnaligned64(p)
    43  		h = rotl_31(h*m1) * m2
    44  		h ^= readUnaligned64(add(p, s-8))
    45  		h = rotl_31(h*m1) * m2
    46  	case s <= 32:
    47  		h ^= readUnaligned64(p)
    48  		h = rotl_31(h*m1) * m2
    49  		h ^= readUnaligned64(add(p, 8))
    50  		h = rotl_31(h*m1) * m2
    51  		h ^= readUnaligned64(add(p, s-16))
    52  		h = rotl_31(h*m1) * m2
    53  		h ^= readUnaligned64(add(p, s-8))
    54  		h = rotl_31(h*m1) * m2
    55  	default:
    56  		v1 := h
    57  		v2 := uint64(seed * hashkey[1])
    58  		v3 := uint64(seed * hashkey[2])
    59  		v4 := uint64(seed * hashkey[3])
    60  		for s >= 32 {
    61  			v1 ^= readUnaligned64(p)
    62  			v1 = rotl_31(v1*m1) * m2
    63  			p = add(p, 8)
    64  			v2 ^= readUnaligned64(p)
    65  			v2 = rotl_31(v2*m2) * m3
    66  			p = add(p, 8)
    67  			v3 ^= readUnaligned64(p)
    68  			v3 = rotl_31(v3*m3) * m4
    69  			p = add(p, 8)
    70  			v4 ^= readUnaligned64(p)
    71  			v4 = rotl_31(v4*m4) * m1
    72  			p = add(p, 8)
    73  			s -= 32
    74  		}
    75  		h = v1 ^ v2 ^ v3 ^ v4
    76  		goto tail
    77  	}
    78  
    79  	h ^= h >> 29
    80  	h *= m3
    81  	h ^= h >> 32
    82  	return uintptr(h)
    83  }
    84  
    85  func memhash32(p unsafe.Pointer, seed uintptr) uintptr {
    86  	h := uint64(seed + 4*hashkey[0])
    87  	v := uint64(readUnaligned32(p))
    88  	h ^= v
    89  	h ^= v << 32
    90  	h = rotl_31(h*m1) * m2
    91  	h ^= h >> 29
    92  	h *= m3
    93  	h ^= h >> 32
    94  	return uintptr(h)
    95  }
    96  
    97  func memhash64(p unsafe.Pointer, seed uintptr) uintptr {
    98  	h := uint64(seed + 8*hashkey[0])
    99  	h ^= uint64(readUnaligned32(p)) | uint64(readUnaligned32(add(p, 4)))<<32
   100  	h = rotl_31(h*m1) * m2
   101  	h ^= h >> 29
   102  	h *= m3
   103  	h ^= h >> 32
   104  	return uintptr(h)
   105  }
   106  
   107  // Note: in order to get the compiler to issue rotl instructions, we
   108  // need to constant fold the shift amount by hand.
   109  // TODO: convince the compiler to issue rotl instructions after inlining.
   110  func rotl_31(x uint64) uint64 {
   111  	return (x << 31) | (x >> (64 - 31))
   112  }
   113  

View as plain text