...
Run Format

Source file src/runtime/lfstack_64bit.go

Documentation: runtime

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build amd64 arm64 mips64 mips64le ppc64 ppc64le s390x wasm
     6  
     7  package runtime
     8  
     9  import "unsafe"
    10  
    11  const (
    12  	// addrBits is the number of bits needed to represent a virtual address.
    13  	//
    14  	// See heapAddrBits for a table of address space sizes on
    15  	// various architectures. 48 bits is enough for all
    16  	// architectures except s390x.
    17  	//
    18  	// On AMD64, virtual addresses are 48-bit (or 57-bit) numbers sign extended to 64.
    19  	// We shift the address left 16 to eliminate the sign extended part and make
    20  	// room in the bottom for the count.
    21  	//
    22  	// On s390x, virtual addresses are 64-bit. There's not much we
    23  	// can do about this, so we just hope that the kernel doesn't
    24  	// get to really high addresses and panic if it does.
    25  	addrBits = 48
    26  
    27  	// In addition to the 16 bits taken from the top, we can take 3 from the
    28  	// bottom, because node must be pointer-aligned, giving a total of 19 bits
    29  	// of count.
    30  	cntBits = 64 - addrBits + 3
    31  )
    32  
    33  func lfstackPack(node *lfnode, cnt uintptr) uint64 {
    34  	return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<<cntBits-1))
    35  }
    36  
    37  func lfstackUnpack(val uint64) *lfnode {
    38  	if GOARCH == "amd64" {
    39  		// amd64 systems can place the stack above the VA hole, so we need to sign extend
    40  		// val before unpacking.
    41  		return (*lfnode)(unsafe.Pointer(uintptr(int64(val) >> cntBits << 3)))
    42  	}
    43  	return (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3)))
    44  }
    45  

View as plain text