Source file src/runtime/malloc.go

Documentation: runtime

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Memory allocator.
     6  //
     7  // This was originally based on tcmalloc, but has diverged quite a bit.
     8  // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
     9  
    10  // The main allocator works in runs of pages.
    11  // Small allocation sizes (up to and including 32 kB) are
    12  // rounded to one of about 70 size classes, each of which
    13  // has its own free set of objects of exactly that size.
    14  // Any free page of memory can be split into a set of objects
    15  // of one size class, which are then managed using a free bitmap.
    16  //
    17  // The allocator's data structures are:
    18  //
    19  //	fixalloc: a free-list allocator for fixed-size off-heap objects,
    20  //		used to manage storage used by the allocator.
    21  //	mheap: the malloc heap, managed at page (8192-byte) granularity.
    22  //	mspan: a run of in-use pages managed by the mheap.
    23  //	mcentral: collects all spans of a given size class.
    24  //	mcache: a per-P cache of mspans with free space.
    25  //	mstats: allocation statistics.
    26  //
    27  // Allocating a small object proceeds up a hierarchy of caches:
    28  //
    29  //	1. Round the size up to one of the small size classes
    30  //	   and look in the corresponding mspan in this P's mcache.
    31  //	   Scan the mspan's free bitmap to find a free slot.
    32  //	   If there is a free slot, allocate it.
    33  //	   This can all be done without acquiring a lock.
    34  //
    35  //	2. If the mspan has no free slots, obtain a new mspan
    36  //	   from the mcentral's list of mspans of the required size
    37  //	   class that have free space.
    38  //	   Obtaining a whole span amortizes the cost of locking
    39  //	   the mcentral.
    40  //
    41  //	3. If the mcentral's mspan list is empty, obtain a run
    42  //	   of pages from the mheap to use for the mspan.
    43  //
    44  //	4. If the mheap is empty or has no page runs large enough,
    45  //	   allocate a new group of pages (at least 1MB) from the
    46  //	   operating system. Allocating a large run of pages
    47  //	   amortizes the cost of talking to the operating system.
    48  //
    49  // Sweeping an mspan and freeing objects on it proceeds up a similar
    50  // hierarchy:
    51  //
    52  //	1. If the mspan is being swept in response to allocation, it
    53  //	   is returned to the mcache to satisfy the allocation.
    54  //
    55  //	2. Otherwise, if the mspan still has allocated objects in it,
    56  //	   it is placed on the mcentral free list for the mspan's size
    57  //	   class.
    58  //
    59  //	3. Otherwise, if all objects in the mspan are free, the mspan's
    60  //	   pages are returned to the mheap and the mspan is now dead.
    61  //
    62  // Allocating and freeing a large object uses the mheap
    63  // directly, bypassing the mcache and mcentral.
    64  //
    65  // If mspan.needzero is false, then free object slots in the mspan are
    66  // already zeroed. Otherwise if needzero is true, objects are zeroed as
    67  // they are allocated. There are various benefits to delaying zeroing
    68  // this way:
    69  //
    70  //	1. Stack frame allocation can avoid zeroing altogether.
    71  //
    72  //	2. It exhibits better temporal locality, since the program is
    73  //	   probably about to write to the memory.
    74  //
    75  //	3. We don't zero pages that never get reused.
    76  
    77  // Virtual memory layout
    78  //
    79  // The heap consists of a set of arenas, which are 64MB on 64-bit and
    80  // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also
    81  // aligned to the arena size.
    82  //
    83  // Each arena has an associated heapArena object that stores the
    84  // metadata for that arena: the heap bitmap for all words in the arena
    85  // and the span map for all pages in the arena. heapArena objects are
    86  // themselves allocated off-heap.
    87  //
    88  // Since arenas are aligned, the address space can be viewed as a
    89  // series of arena frames. The arena map (mheap_.arenas) maps from
    90  // arena frame number to *heapArena, or nil for parts of the address
    91  // space not backed by the Go heap. The arena map is structured as a
    92  // two-level array consisting of a "L1" arena map and many "L2" arena
    93  // maps; however, since arenas are large, on many architectures, the
    94  // arena map consists of a single, large L2 map.
    95  //
    96  // The arena map covers the entire possible address space, allowing
    97  // the Go heap to use any part of the address space. The allocator
    98  // attempts to keep arenas contiguous so that large spans (and hence
    99  // large objects) can cross arenas.
   100  
   101  package runtime
   102  
   103  import (
   104  	"runtime/internal/atomic"
   105  	"runtime/internal/math"
   106  	"runtime/internal/sys"
   107  	"unsafe"
   108  )
   109  
   110  const (
   111  	debugMalloc = false
   112  
   113  	maxTinySize   = _TinySize
   114  	tinySizeClass = _TinySizeClass
   115  	maxSmallSize  = _MaxSmallSize
   116  
   117  	pageShift = _PageShift
   118  	pageSize  = _PageSize
   119  	pageMask  = _PageMask
   120  	// By construction, single page spans of the smallest object class
   121  	// have the most objects per span.
   122  	maxObjsPerSpan = pageSize / 8
   123  
   124  	concurrentSweep = _ConcurrentSweep
   125  
   126  	_PageSize = 1 << _PageShift
   127  	_PageMask = _PageSize - 1
   128  
   129  	// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
   130  	_64bit = 1 << (^uintptr(0) >> 63) / 2
   131  
   132  	// Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
   133  	_TinySize      = 16
   134  	_TinySizeClass = int8(2)
   135  
   136  	_FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
   137  
   138  	// Per-P, per order stack segment cache size.
   139  	_StackCacheSize = 32 * 1024
   140  
   141  	// Number of orders that get caching. Order 0 is FixedStack
   142  	// and each successive order is twice as large.
   143  	// We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
   144  	// will be allocated directly.
   145  	// Since FixedStack is different on different systems, we
   146  	// must vary NumStackOrders to keep the same maximum cached size.
   147  	//   OS               | FixedStack | NumStackOrders
   148  	//   -----------------+------------+---------------
   149  	//   linux/darwin/bsd | 2KB        | 4
   150  	//   windows/32       | 4KB        | 3
   151  	//   windows/64       | 8KB        | 2
   152  	//   plan9            | 4KB        | 3
   153  	_NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
   154  
   155  	// heapAddrBits is the number of bits in a heap address. On
   156  	// amd64, addresses are sign-extended beyond heapAddrBits. On
   157  	// other arches, they are zero-extended.
   158  	//
   159  	// On most 64-bit platforms, we limit this to 48 bits based on a
   160  	// combination of hardware and OS limitations.
   161  	//
   162  	// amd64 hardware limits addresses to 48 bits, sign-extended
   163  	// to 64 bits. Addresses where the top 16 bits are not either
   164  	// all 0 or all 1 are "non-canonical" and invalid. Because of
   165  	// these "negative" addresses, we offset addresses by 1<<47
   166  	// (arenaBaseOffset) on amd64 before computing indexes into
   167  	// the heap arenas index. In 2017, amd64 hardware added
   168  	// support for 57 bit addresses; however, currently only Linux
   169  	// supports this extension and the kernel will never choose an
   170  	// address above 1<<47 unless mmap is called with a hint
   171  	// address above 1<<47 (which we never do).
   172  	//
   173  	// arm64 hardware (as of ARMv8) limits user addresses to 48
   174  	// bits, in the range [0, 1<<48).
   175  	//
   176  	// ppc64, mips64, and s390x support arbitrary 64 bit addresses
   177  	// in hardware. On Linux, Go leans on stricter OS limits. Based
   178  	// on Linux's processor.h, the user address space is limited as
   179  	// follows on 64-bit architectures:
   180  	//
   181  	// Architecture  Name              Maximum Value (exclusive)
   182  	// ---------------------------------------------------------------------
   183  	// amd64         TASK_SIZE_MAX     0x007ffffffff000 (47 bit addresses)
   184  	// arm64         TASK_SIZE_64      0x01000000000000 (48 bit addresses)
   185  	// ppc64{,le}    TASK_SIZE_USER64  0x00400000000000 (46 bit addresses)
   186  	// mips64{,le}   TASK_SIZE64       0x00010000000000 (40 bit addresses)
   187  	// s390x         TASK_SIZE         1<<64 (64 bit addresses)
   188  	//
   189  	// These limits may increase over time, but are currently at
   190  	// most 48 bits except on s390x. On all architectures, Linux
   191  	// starts placing mmap'd regions at addresses that are
   192  	// significantly below 48 bits, so even if it's possible to
   193  	// exceed Go's 48 bit limit, it's extremely unlikely in
   194  	// practice.
   195  	//
   196  	// On 32-bit platforms, we accept the full 32-bit address
   197  	// space because doing so is cheap.
   198  	// mips32 only has access to the low 2GB of virtual memory, so
   199  	// we further limit it to 31 bits.
   200  	//
   201  	// On darwin/arm64, although 64-bit pointers are presumably
   202  	// available, pointers are truncated to 33 bits. Furthermore,
   203  	// only the top 4 GiB of the address space are actually available
   204  	// to the application, but we allow the whole 33 bits anyway for
   205  	// simplicity.
   206  	// TODO(mknyszek): Consider limiting it to 32 bits and using
   207  	// arenaBaseOffset to offset into the top 4 GiB.
   208  	//
   209  	// WebAssembly currently has a limit of 4GB linear memory.
   210  	heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosDarwin*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosDarwin*sys.GoarchArm64
   211  
   212  	// maxAlloc is the maximum size of an allocation. On 64-bit,
   213  	// it's theoretically possible to allocate 1<<heapAddrBits bytes. On
   214  	// 32-bit, however, this is one less than 1<<32 because the
   215  	// number of bytes in the address space doesn't actually fit
   216  	// in a uintptr.
   217  	maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
   218  
   219  	// The number of bits in a heap address, the size of heap
   220  	// arenas, and the L1 and L2 arena map sizes are related by
   221  	//
   222  	//   (1 << addr bits) = arena size * L1 entries * L2 entries
   223  	//
   224  	// Currently, we balance these as follows:
   225  	//
   226  	//       Platform  Addr bits  Arena size  L1 entries   L2 entries
   227  	// --------------  ---------  ----------  ----------  -----------
   228  	//       */64-bit         48        64MB           1    4M (32MB)
   229  	// windows/64-bit         48         4MB          64    1M  (8MB)
   230  	//       */32-bit         32         4MB           1  1024  (4KB)
   231  	//     */mips(le)         31         4MB           1   512  (2KB)
   232  
   233  	// heapArenaBytes is the size of a heap arena. The heap
   234  	// consists of mappings of size heapArenaBytes, aligned to
   235  	// heapArenaBytes. The initial heap mapping is one arena.
   236  	//
   237  	// This is currently 64MB on 64-bit non-Windows and 4MB on
   238  	// 32-bit and on Windows. We use smaller arenas on Windows
   239  	// because all committed memory is charged to the process,
   240  	// even if it's not touched. Hence, for processes with small
   241  	// heaps, the mapped arena space needs to be commensurate.
   242  	// This is particularly important with the race detector,
   243  	// since it significantly amplifies the cost of committed
   244  	// memory.
   245  	heapArenaBytes = 1 << logHeapArenaBytes
   246  
   247  	// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
   248  	// prefer using heapArenaBytes where possible (we need the
   249  	// constant to compute some other constants).
   250  	logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm
   251  
   252  	// heapArenaBitmapBytes is the size of each heap arena's bitmap.
   253  	heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2)
   254  
   255  	pagesPerArena = heapArenaBytes / pageSize
   256  
   257  	// arenaL1Bits is the number of bits of the arena number
   258  	// covered by the first level arena map.
   259  	//
   260  	// This number should be small, since the first level arena
   261  	// map requires PtrSize*(1<<arenaL1Bits) of space in the
   262  	// binary's BSS. It can be zero, in which case the first level
   263  	// index is effectively unused. There is a performance benefit
   264  	// to this, since the generated code can be more efficient,
   265  	// but comes at the cost of having a large L2 mapping.
   266  	//
   267  	// We use the L1 map on 64-bit Windows because the arena size
   268  	// is small, but the address space is still 48 bits, and
   269  	// there's a high cost to having a large L2.
   270  	arenaL1Bits = 6 * (_64bit * sys.GoosWindows)
   271  
   272  	// arenaL2Bits is the number of bits of the arena number
   273  	// covered by the second level arena index.
   274  	//
   275  	// The size of each arena map allocation is proportional to
   276  	// 1<<arenaL2Bits, so it's important that this not be too
   277  	// large. 48 bits leads to 32MB arena index allocations, which
   278  	// is about the practical threshold.
   279  	arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits
   280  
   281  	// arenaL1Shift is the number of bits to shift an arena frame
   282  	// number by to compute an index into the first level arena map.
   283  	arenaL1Shift = arenaL2Bits
   284  
   285  	// arenaBits is the total bits in a combined arena map index.
   286  	// This is split between the index into the L1 arena map and
   287  	// the L2 arena map.
   288  	arenaBits = arenaL1Bits + arenaL2Bits
   289  
   290  	// arenaBaseOffset is the pointer value that corresponds to
   291  	// index 0 in the heap arena map.
   292  	//
   293  	// On amd64, the address space is 48 bits, sign extended to 64
   294  	// bits. This offset lets us handle "negative" addresses (or
   295  	// high addresses if viewed as unsigned).
   296  	//
   297  	// On aix/ppc64, this offset allows to keep the heapAddrBits to
   298  	// 48. Otherwize, it would be 60 in order to handle mmap addresses
   299  	// (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
   300  	// case, the memory reserved in (s *pageAlloc).init for chunks
   301  	// is causing important slowdowns.
   302  	//
   303  	// On other platforms, the user address space is contiguous
   304  	// and starts at 0, so no offset is necessary.
   305  	arenaBaseOffset = sys.GoarchAmd64*(1<<47) + (^0x0a00000000000000+1)&uintptrMask*sys.GoosAix
   306  
   307  	// Max number of threads to run garbage collection.
   308  	// 2, 3, and 4 are all plausible maximums depending
   309  	// on the hardware details of the machine. The garbage
   310  	// collector scales well to 32 cpus.
   311  	_MaxGcproc = 32
   312  
   313  	// minLegalPointer is the smallest possible legal pointer.
   314  	// This is the smallest possible architectural page size,
   315  	// since we assume that the first page is never mapped.
   316  	//
   317  	// This should agree with minZeroPage in the compiler.
   318  	minLegalPointer uintptr = 4096
   319  )
   320  
   321  // physPageSize is the size in bytes of the OS's physical pages.
   322  // Mapping and unmapping operations must be done at multiples of
   323  // physPageSize.
   324  //
   325  // This must be set by the OS init code (typically in osinit) before
   326  // mallocinit.
   327  var physPageSize uintptr
   328  
   329  // physHugePageSize is the size in bytes of the OS's default physical huge
   330  // page size whose allocation is opaque to the application. It is assumed
   331  // and verified to be a power of two.
   332  //
   333  // If set, this must be set by the OS init code (typically in osinit) before
   334  // mallocinit. However, setting it at all is optional, and leaving the default
   335  // value is always safe (though potentially less efficient).
   336  //
   337  // Since physHugePageSize is always assumed to be a power of two,
   338  // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
   339  // The purpose of physHugePageShift is to avoid doing divisions in
   340  // performance critical functions.
   341  var (
   342  	physHugePageSize  uintptr
   343  	physHugePageShift uint
   344  )
   345  
   346  // OS memory management abstraction layer
   347  //
   348  // Regions of the address space managed by the runtime may be in one of four
   349  // states at any given time:
   350  // 1) None - Unreserved and unmapped, the default state of any region.
   351  // 2) Reserved - Owned by the runtime, but accessing it would cause a fault.
   352  //               Does not count against the process' memory footprint.
   353  // 3) Prepared - Reserved, intended not to be backed by physical memory (though
   354  //               an OS may implement this lazily). Can transition efficiently to
   355  //               Ready. Accessing memory in such a region is undefined (may
   356  //               fault, may give back unexpected zeroes, etc.).
   357  // 4) Ready - may be accessed safely.
   358  //
   359  // This set of states is more than is strictly necessary to support all the
   360  // currently supported platforms. One could get by with just None, Reserved, and
   361  // Ready. However, the Prepared state gives us flexibility for performance
   362  // purposes. For example, on POSIX-y operating systems, Reserved is usually a
   363  // private anonymous mmap'd region with PROT_NONE set, and to transition
   364  // to Ready would require setting PROT_READ|PROT_WRITE. However the
   365  // underspecification of Prepared lets us use just MADV_FREE to transition from
   366  // Ready to Prepared. Thus with the Prepared state we can set the permission
   367  // bits just once early on, we can efficiently tell the OS that it's free to
   368  // take pages away from us when we don't strictly need them.
   369  //
   370  // For each OS there is a common set of helpers defined that transition
   371  // memory regions between these states. The helpers are as follows:
   372  //
   373  // sysAlloc transitions an OS-chosen region of memory from None to Ready.
   374  // More specifically, it obtains a large chunk of zeroed memory from the
   375  // operating system, typically on the order of a hundred kilobytes
   376  // or a megabyte. This memory is always immediately available for use.
   377  //
   378  // sysFree transitions a memory region from any state to None. Therefore, it
   379  // returns memory unconditionally. It is used if an out-of-memory error has been
   380  // detected midway through an allocation or to carve out an aligned section of
   381  // the address space. It is okay if sysFree is a no-op only if sysReserve always
   382  // returns a memory region aligned to the heap allocator's alignment
   383  // restrictions.
   384  //
   385  // sysReserve transitions a memory region from None to Reserved. It reserves
   386  // address space in such a way that it would cause a fatal fault upon access
   387  // (either via permissions or not committing the memory). Such a reservation is
   388  // thus never backed by physical memory.
   389  // If the pointer passed to it is non-nil, the caller wants the
   390  // reservation there, but sysReserve can still choose another
   391  // location if that one is unavailable.
   392  // NOTE: sysReserve returns OS-aligned memory, but the heap allocator
   393  // may use larger alignment, so the caller must be careful to realign the
   394  // memory obtained by sysReserve.
   395  //
   396  // sysMap transitions a memory region from Reserved to Prepared. It ensures the
   397  // memory region can be efficiently transitioned to Ready.
   398  //
   399  // sysUsed transitions a memory region from Prepared to Ready. It notifies the
   400  // operating system that the memory region is needed and ensures that the region
   401  // may be safely accessed. This is typically a no-op on systems that don't have
   402  // an explicit commit step and hard over-commit limits, but is critical on
   403  // Windows, for example.
   404  //
   405  // sysUnused transitions a memory region from Ready to Prepared. It notifies the
   406  // operating system that the physical pages backing this memory region are no
   407  // longer needed and can be reused for other purposes. The contents of a
   408  // sysUnused memory region are considered forfeit and the region must not be
   409  // accessed again until sysUsed is called.
   410  //
   411  // sysFault transitions a memory region from Ready or Prepared to Reserved. It
   412  // marks a region such that it will always fault if accessed. Used only for
   413  // debugging the runtime.
   414  
   415  func mallocinit() {
   416  	if class_to_size[_TinySizeClass] != _TinySize {
   417  		throw("bad TinySizeClass")
   418  	}
   419  
   420  	testdefersizes()
   421  
   422  	if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
   423  		// heapBits expects modular arithmetic on bitmap
   424  		// addresses to work.
   425  		throw("heapArenaBitmapBytes not a power of 2")
   426  	}
   427  
   428  	// Copy class sizes out for statistics table.
   429  	for i := range class_to_size {
   430  		memstats.by_size[i].size = uint32(class_to_size[i])
   431  	}
   432  
   433  	// Check physPageSize.
   434  	if physPageSize == 0 {
   435  		// The OS init code failed to fetch the physical page size.
   436  		throw("failed to get system page size")
   437  	}
   438  	if physPageSize > maxPhysPageSize {
   439  		print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
   440  		throw("bad system page size")
   441  	}
   442  	if physPageSize < minPhysPageSize {
   443  		print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
   444  		throw("bad system page size")
   445  	}
   446  	if physPageSize&(physPageSize-1) != 0 {
   447  		print("system page size (", physPageSize, ") must be a power of 2\n")
   448  		throw("bad system page size")
   449  	}
   450  	if physHugePageSize&(physHugePageSize-1) != 0 {
   451  		print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
   452  		throw("bad system huge page size")
   453  	}
   454  	if physHugePageSize > maxPhysHugePageSize {
   455  		// physHugePageSize is greater than the maximum supported huge page size.
   456  		// Don't throw here, like in the other cases, since a system configured
   457  		// in this way isn't wrong, we just don't have the code to support them.
   458  		// Instead, silently set the huge page size to zero.
   459  		physHugePageSize = 0
   460  	}
   461  	if physHugePageSize != 0 {
   462  		// Since physHugePageSize is a power of 2, it suffices to increase
   463  		// physHugePageShift until 1<<physHugePageShift == physHugePageSize.
   464  		for 1<<physHugePageShift != physHugePageSize {
   465  			physHugePageShift++
   466  		}
   467  	}
   468  
   469  	// Initialize the heap.
   470  	mheap_.init()
   471  	_g_ := getg()
   472  	_g_.m.mcache = allocmcache()
   473  
   474  	// Create initial arena growth hints.
   475  	if sys.PtrSize == 8 {
   476  		// On a 64-bit machine, we pick the following hints
   477  		// because:
   478  		//
   479  		// 1. Starting from the middle of the address space
   480  		// makes it easier to grow out a contiguous range
   481  		// without running in to some other mapping.
   482  		//
   483  		// 2. This makes Go heap addresses more easily
   484  		// recognizable when debugging.
   485  		//
   486  		// 3. Stack scanning in gccgo is still conservative,
   487  		// so it's important that addresses be distinguishable
   488  		// from other data.
   489  		//
   490  		// Starting at 0x00c0 means that the valid memory addresses
   491  		// will begin 0x00c0, 0x00c1, ...
   492  		// In little-endian, that's c0 00, c1 00, ... None of those are valid
   493  		// UTF-8 sequences, and they are otherwise as far away from
   494  		// ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
   495  		// addresses. An earlier attempt to use 0x11f8 caused out of memory errors
   496  		// on OS X during thread allocations.  0x00c0 causes conflicts with
   497  		// AddressSanitizer which reserves all memory up to 0x0100.
   498  		// These choices reduce the odds of a conservative garbage collector
   499  		// not collecting memory because some non-pointer block of memory
   500  		// had a bit pattern that matched a memory address.
   501  		//
   502  		// However, on arm64, we ignore all this advice above and slam the
   503  		// allocation at 0x40 << 32 because when using 4k pages with 3-level
   504  		// translation buffers, the user address space is limited to 39 bits
   505  		// On darwin/arm64, the address space is even smaller.
   506  		//
   507  		// On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
   508  		// processes.
   509  		for i := 0x7f; i >= 0; i-- {
   510  			var p uintptr
   511  			switch {
   512  			case GOARCH == "arm64" && GOOS == "darwin":
   513  				p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
   514  			case GOARCH == "arm64":
   515  				p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
   516  			case GOOS == "aix":
   517  				if i == 0 {
   518  					// We don't use addresses directly after 0x0A00000000000000
   519  					// to avoid collisions with others mmaps done by non-go programs.
   520  					continue
   521  				}
   522  				p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
   523  			case raceenabled:
   524  				// The TSAN runtime requires the heap
   525  				// to be in the range [0x00c000000000,
   526  				// 0x00e000000000).
   527  				p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
   528  				if p >= uintptrMask&0x00e000000000 {
   529  					continue
   530  				}
   531  			default:
   532  				p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
   533  			}
   534  			hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   535  			hint.addr = p
   536  			hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   537  		}
   538  	} else {
   539  		// On a 32-bit machine, we're much more concerned
   540  		// about keeping the usable heap contiguous.
   541  		// Hence:
   542  		//
   543  		// 1. We reserve space for all heapArenas up front so
   544  		// they don't get interleaved with the heap. They're
   545  		// ~258MB, so this isn't too bad. (We could reserve a
   546  		// smaller amount of space up front if this is a
   547  		// problem.)
   548  		//
   549  		// 2. We hint the heap to start right above the end of
   550  		// the binary so we have the best chance of keeping it
   551  		// contiguous.
   552  		//
   553  		// 3. We try to stake out a reasonably large initial
   554  		// heap reservation.
   555  
   556  		const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
   557  		meta := uintptr(sysReserve(nil, arenaMetaSize))
   558  		if meta != 0 {
   559  			mheap_.heapArenaAlloc.init(meta, arenaMetaSize)
   560  		}
   561  
   562  		// We want to start the arena low, but if we're linked
   563  		// against C code, it's possible global constructors
   564  		// have called malloc and adjusted the process' brk.
   565  		// Query the brk so we can avoid trying to map the
   566  		// region over it (which will cause the kernel to put
   567  		// the region somewhere else, likely at a high
   568  		// address).
   569  		procBrk := sbrk0()
   570  
   571  		// If we ask for the end of the data segment but the
   572  		// operating system requires a little more space
   573  		// before we can start allocating, it will give out a
   574  		// slightly higher pointer. Except QEMU, which is
   575  		// buggy, as usual: it won't adjust the pointer
   576  		// upward. So adjust it upward a little bit ourselves:
   577  		// 1/4 MB to get away from the running binary image.
   578  		p := firstmoduledata.end
   579  		if p < procBrk {
   580  			p = procBrk
   581  		}
   582  		if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
   583  			p = mheap_.heapArenaAlloc.end
   584  		}
   585  		p = alignUp(p+(256<<10), heapArenaBytes)
   586  		// Because we're worried about fragmentation on
   587  		// 32-bit, we try to make a large initial reservation.
   588  		arenaSizes := []uintptr{
   589  			512 << 20,
   590  			256 << 20,
   591  			128 << 20,
   592  		}
   593  		for _, arenaSize := range arenaSizes {
   594  			a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
   595  			if a != nil {
   596  				mheap_.arena.init(uintptr(a), size)
   597  				p = uintptr(a) + size // For hint below
   598  				break
   599  			}
   600  		}
   601  		hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   602  		hint.addr = p
   603  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   604  	}
   605  }
   606  
   607  // sysAlloc allocates heap arena space for at least n bytes. The
   608  // returned pointer is always heapArenaBytes-aligned and backed by
   609  // h.arenas metadata. The returned size is always a multiple of
   610  // heapArenaBytes. sysAlloc returns nil on failure.
   611  // There is no corresponding free function.
   612  //
   613  // sysAlloc returns a memory region in the Prepared state. This region must
   614  // be transitioned to Ready before use.
   615  //
   616  // h must be locked.
   617  func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
   618  	n = alignUp(n, heapArenaBytes)
   619  
   620  	// First, try the arena pre-reservation.
   621  	v = h.arena.alloc(n, heapArenaBytes, &memstats.heap_sys)
   622  	if v != nil {
   623  		size = n
   624  		goto mapped
   625  	}
   626  
   627  	// Try to grow the heap at a hint address.
   628  	for h.arenaHints != nil {
   629  		hint := h.arenaHints
   630  		p := hint.addr
   631  		if hint.down {
   632  			p -= n
   633  		}
   634  		if p+n < p {
   635  			// We can't use this, so don't ask.
   636  			v = nil
   637  		} else if arenaIndex(p+n-1) >= 1<<arenaBits {
   638  			// Outside addressable heap. Can't use.
   639  			v = nil
   640  		} else {
   641  			v = sysReserve(unsafe.Pointer(p), n)
   642  		}
   643  		if p == uintptr(v) {
   644  			// Success. Update the hint.
   645  			if !hint.down {
   646  				p += n
   647  			}
   648  			hint.addr = p
   649  			size = n
   650  			break
   651  		}
   652  		// Failed. Discard this hint and try the next.
   653  		//
   654  		// TODO: This would be cleaner if sysReserve could be
   655  		// told to only return the requested address. In
   656  		// particular, this is already how Windows behaves, so
   657  		// it would simplify things there.
   658  		if v != nil {
   659  			sysFree(v, n, nil)
   660  		}
   661  		h.arenaHints = hint.next
   662  		h.arenaHintAlloc.free(unsafe.Pointer(hint))
   663  	}
   664  
   665  	if size == 0 {
   666  		if raceenabled {
   667  			// The race detector assumes the heap lives in
   668  			// [0x00c000000000, 0x00e000000000), but we
   669  			// just ran out of hints in this region. Give
   670  			// a nice failure.
   671  			throw("too many address space collisions for -race mode")
   672  		}
   673  
   674  		// All of the hints failed, so we'll take any
   675  		// (sufficiently aligned) address the kernel will give
   676  		// us.
   677  		v, size = sysReserveAligned(nil, n, heapArenaBytes)
   678  		if v == nil {
   679  			return nil, 0
   680  		}
   681  
   682  		// Create new hints for extending this region.
   683  		hint := (*arenaHint)(h.arenaHintAlloc.alloc())
   684  		hint.addr, hint.down = uintptr(v), true
   685  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   686  		hint = (*arenaHint)(h.arenaHintAlloc.alloc())
   687  		hint.addr = uintptr(v) + size
   688  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   689  	}
   690  
   691  	// Check for bad pointers or pointers we can't use.
   692  	{
   693  		var bad string
   694  		p := uintptr(v)
   695  		if p+size < p {
   696  			bad = "region exceeds uintptr range"
   697  		} else if arenaIndex(p) >= 1<<arenaBits {
   698  			bad = "base outside usable address space"
   699  		} else if arenaIndex(p+size-1) >= 1<<arenaBits {
   700  			bad = "end outside usable address space"
   701  		}
   702  		if bad != "" {
   703  			// This should be impossible on most architectures,
   704  			// but it would be really confusing to debug.
   705  			print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n")
   706  			throw("memory reservation exceeds address space limit")
   707  		}
   708  	}
   709  
   710  	if uintptr(v)&(heapArenaBytes-1) != 0 {
   711  		throw("misrounded allocation in sysAlloc")
   712  	}
   713  
   714  	// Transition from Reserved to Prepared.
   715  	sysMap(v, size, &memstats.heap_sys)
   716  
   717  mapped:
   718  	// Create arena metadata.
   719  	for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
   720  		l2 := h.arenas[ri.l1()]
   721  		if l2 == nil {
   722  			// Allocate an L2 arena map.
   723  			l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil))
   724  			if l2 == nil {
   725  				throw("out of memory allocating heap arena map")
   726  			}
   727  			atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2))
   728  		}
   729  
   730  		if l2[ri.l2()] != nil {
   731  			throw("arena already initialized")
   732  		}
   733  		var r *heapArena
   734  		r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys))
   735  		if r == nil {
   736  			r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys))
   737  			if r == nil {
   738  				throw("out of memory allocating heap arena metadata")
   739  			}
   740  		}
   741  
   742  		// Add the arena to the arenas list.
   743  		if len(h.allArenas) == cap(h.allArenas) {
   744  			size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize
   745  			if size == 0 {
   746  				size = physPageSize
   747  			}
   748  			newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gc_sys))
   749  			if newArray == nil {
   750  				throw("out of memory allocating allArenas")
   751  			}
   752  			oldSlice := h.allArenas
   753  			*(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)}
   754  			copy(h.allArenas, oldSlice)
   755  			// Do not free the old backing array because
   756  			// there may be concurrent readers. Since we
   757  			// double the array each time, this can lead
   758  			// to at most 2x waste.
   759  		}
   760  		h.allArenas = h.allArenas[:len(h.allArenas)+1]
   761  		h.allArenas[len(h.allArenas)-1] = ri
   762  
   763  		// Store atomically just in case an object from the
   764  		// new heap arena becomes visible before the heap lock
   765  		// is released (which shouldn't happen, but there's
   766  		// little downside to this).
   767  		atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r))
   768  	}
   769  
   770  	// Tell the race detector about the new heap memory.
   771  	if raceenabled {
   772  		racemapshadow(v, size)
   773  	}
   774  
   775  	return
   776  }
   777  
   778  // sysReserveAligned is like sysReserve, but the returned pointer is
   779  // aligned to align bytes. It may reserve either n or n+align bytes,
   780  // so it returns the size that was reserved.
   781  func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
   782  	// Since the alignment is rather large in uses of this
   783  	// function, we're not likely to get it by chance, so we ask
   784  	// for a larger region and remove the parts we don't need.
   785  	retries := 0
   786  retry:
   787  	p := uintptr(sysReserve(v, size+align))
   788  	switch {
   789  	case p == 0:
   790  		return nil, 0
   791  	case p&(align-1) == 0:
   792  		// We got lucky and got an aligned region, so we can
   793  		// use the whole thing.
   794  		return unsafe.Pointer(p), size + align
   795  	case GOOS == "windows":
   796  		// On Windows we can't release pieces of a
   797  		// reservation, so we release the whole thing and
   798  		// re-reserve the aligned sub-region. This may race,
   799  		// so we may have to try again.
   800  		sysFree(unsafe.Pointer(p), size+align, nil)
   801  		p = alignUp(p, align)
   802  		p2 := sysReserve(unsafe.Pointer(p), size)
   803  		if p != uintptr(p2) {
   804  			// Must have raced. Try again.
   805  			sysFree(p2, size, nil)
   806  			if retries++; retries == 100 {
   807  				throw("failed to allocate aligned heap memory; too many retries")
   808  			}
   809  			goto retry
   810  		}
   811  		// Success.
   812  		return p2, size
   813  	default:
   814  		// Trim off the unaligned parts.
   815  		pAligned := alignUp(p, align)
   816  		sysFree(unsafe.Pointer(p), pAligned-p, nil)
   817  		end := pAligned + size
   818  		endLen := (p + size + align) - end
   819  		if endLen > 0 {
   820  			sysFree(unsafe.Pointer(end), endLen, nil)
   821  		}
   822  		return unsafe.Pointer(pAligned), size
   823  	}
   824  }
   825  
   826  // base address for all 0-byte allocations
   827  var zerobase uintptr
   828  
   829  // nextFreeFast returns the next free object if one is quickly available.
   830  // Otherwise it returns 0.
   831  func nextFreeFast(s *mspan) gclinkptr {
   832  	theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache?
   833  	if theBit < 64 {
   834  		result := s.freeindex + uintptr(theBit)
   835  		if result < s.nelems {
   836  			freeidx := result + 1
   837  			if freeidx%64 == 0 && freeidx != s.nelems {
   838  				return 0
   839  			}
   840  			s.allocCache >>= uint(theBit + 1)
   841  			s.freeindex = freeidx
   842  			s.allocCount++
   843  			return gclinkptr(result*s.elemsize + s.base())
   844  		}
   845  	}
   846  	return 0
   847  }
   848  
   849  // nextFree returns the next free object from the cached span if one is available.
   850  // Otherwise it refills the cache with a span with an available object and
   851  // returns that object along with a flag indicating that this was a heavy
   852  // weight allocation. If it is a heavy weight allocation the caller must
   853  // determine whether a new GC cycle needs to be started or if the GC is active
   854  // whether this goroutine needs to assist the GC.
   855  //
   856  // Must run in a non-preemptible context since otherwise the owner of
   857  // c could change.
   858  func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
   859  	s = c.alloc[spc]
   860  	shouldhelpgc = false
   861  	freeIndex := s.nextFreeIndex()
   862  	if freeIndex == s.nelems {
   863  		// The span is full.
   864  		if uintptr(s.allocCount) != s.nelems {
   865  			println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
   866  			throw("s.allocCount != s.nelems && freeIndex == s.nelems")
   867  		}
   868  		c.refill(spc)
   869  		shouldhelpgc = true
   870  		s = c.alloc[spc]
   871  
   872  		freeIndex = s.nextFreeIndex()
   873  	}
   874  
   875  	if freeIndex >= s.nelems {
   876  		throw("freeIndex is not valid")
   877  	}
   878  
   879  	v = gclinkptr(freeIndex*s.elemsize + s.base())
   880  	s.allocCount++
   881  	if uintptr(s.allocCount) > s.nelems {
   882  		println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
   883  		throw("s.allocCount > s.nelems")
   884  	}
   885  	return
   886  }
   887  
   888  // Allocate an object of size bytes.
   889  // Small objects are allocated from the per-P cache's free lists.
   890  // Large objects (> 32 kB) are allocated straight from the heap.
   891  func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   892  	if gcphase == _GCmarktermination {
   893  		throw("mallocgc called with gcphase == _GCmarktermination")
   894  	}
   895  
   896  	if size == 0 {
   897  		return unsafe.Pointer(&zerobase)
   898  	}
   899  
   900  	if debug.sbrk != 0 {
   901  		align := uintptr(16)
   902  		if typ != nil {
   903  			// TODO(austin): This should be just
   904  			//   align = uintptr(typ.align)
   905  			// but that's only 4 on 32-bit platforms,
   906  			// even if there's a uint64 field in typ (see #599).
   907  			// This causes 64-bit atomic accesses to panic.
   908  			// Hence, we use stricter alignment that matches
   909  			// the normal allocator better.
   910  			if size&7 == 0 {
   911  				align = 8
   912  			} else if size&3 == 0 {
   913  				align = 4
   914  			} else if size&1 == 0 {
   915  				align = 2
   916  			} else {
   917  				align = 1
   918  			}
   919  		}
   920  		return persistentalloc(size, align, &memstats.other_sys)
   921  	}
   922  
   923  	// assistG is the G to charge for this allocation, or nil if
   924  	// GC is not currently active.
   925  	var assistG *g
   926  	if gcBlackenEnabled != 0 {
   927  		// Charge the current user G for this allocation.
   928  		assistG = getg()
   929  		if assistG.m.curg != nil {
   930  			assistG = assistG.m.curg
   931  		}
   932  		// Charge the allocation against the G. We'll account
   933  		// for internal fragmentation at the end of mallocgc.
   934  		assistG.gcAssistBytes -= int64(size)
   935  
   936  		if assistG.gcAssistBytes < 0 {
   937  			// This G is in debt. Assist the GC to correct
   938  			// this before allocating. This must happen
   939  			// before disabling preemption.
   940  			gcAssistAlloc(assistG)
   941  		}
   942  	}
   943  
   944  	// Set mp.mallocing to keep from being preempted by GC.
   945  	mp := acquirem()
   946  	if mp.mallocing != 0 {
   947  		throw("malloc deadlock")
   948  	}
   949  	if mp.gsignal == getg() {
   950  		throw("malloc during signal")
   951  	}
   952  	mp.mallocing = 1
   953  
   954  	shouldhelpgc := false
   955  	dataSize := size
   956  	c := gomcache()
   957  	var x unsafe.Pointer
   958  	noscan := typ == nil || typ.ptrdata == 0
   959  	if size <= maxSmallSize {
   960  		if noscan && size < maxTinySize {
   961  			// Tiny allocator.
   962  			//
   963  			// Tiny allocator combines several tiny allocation requests
   964  			// into a single memory block. The resulting memory block
   965  			// is freed when all subobjects are unreachable. The subobjects
   966  			// must be noscan (don't have pointers), this ensures that
   967  			// the amount of potentially wasted memory is bounded.
   968  			//
   969  			// Size of the memory block used for combining (maxTinySize) is tunable.
   970  			// Current setting is 16 bytes, which relates to 2x worst case memory
   971  			// wastage (when all but one subobjects are unreachable).
   972  			// 8 bytes would result in no wastage at all, but provides less
   973  			// opportunities for combining.
   974  			// 32 bytes provides more opportunities for combining,
   975  			// but can lead to 4x worst case wastage.
   976  			// The best case winning is 8x regardless of block size.
   977  			//
   978  			// Objects obtained from tiny allocator must not be freed explicitly.
   979  			// So when an object will be freed explicitly, we ensure that
   980  			// its size >= maxTinySize.
   981  			//
   982  			// SetFinalizer has a special case for objects potentially coming
   983  			// from tiny allocator, it such case it allows to set finalizers
   984  			// for an inner byte of a memory block.
   985  			//
   986  			// The main targets of tiny allocator are small strings and
   987  			// standalone escaping variables. On a json benchmark
   988  			// the allocator reduces number of allocations by ~12% and
   989  			// reduces heap size by ~20%.
   990  			off := c.tinyoffset
   991  			// Align tiny pointer for required (conservative) alignment.
   992  			if size&7 == 0 {
   993  				off = alignUp(off, 8)
   994  			} else if size&3 == 0 {
   995  				off = alignUp(off, 4)
   996  			} else if size&1 == 0 {
   997  				off = alignUp(off, 2)
   998  			}
   999  			if off+size <= maxTinySize && c.tiny != 0 {
  1000  				// The object fits into existing tiny block.
  1001  				x = unsafe.Pointer(c.tiny + off)
  1002  				c.tinyoffset = off + size
  1003  				c.local_tinyallocs++
  1004  				mp.mallocing = 0
  1005  				releasem(mp)
  1006  				return x
  1007  			}
  1008  			// Allocate a new maxTinySize block.
  1009  			span := c.alloc[tinySpanClass]
  1010  			v := nextFreeFast(span)
  1011  			if v == 0 {
  1012  				v, _, shouldhelpgc = c.nextFree(tinySpanClass)
  1013  			}
  1014  			x = unsafe.Pointer(v)
  1015  			(*[2]uint64)(x)[0] = 0
  1016  			(*[2]uint64)(x)[1] = 0
  1017  			// See if we need to replace the existing tiny block with the new one
  1018  			// based on amount of remaining free space.
  1019  			if size < c.tinyoffset || c.tiny == 0 {
  1020  				c.tiny = uintptr(x)
  1021  				c.tinyoffset = size
  1022  			}
  1023  			size = maxTinySize
  1024  		} else {
  1025  			var sizeclass uint8
  1026  			if size <= smallSizeMax-8 {
  1027  				sizeclass = size_to_class8[(size+smallSizeDiv-1)/smallSizeDiv]
  1028  			} else {
  1029  				sizeclass = size_to_class128[(size-smallSizeMax+largeSizeDiv-1)/largeSizeDiv]
  1030  			}
  1031  			size = uintptr(class_to_size[sizeclass])
  1032  			spc := makeSpanClass(sizeclass, noscan)
  1033  			span := c.alloc[spc]
  1034  			v := nextFreeFast(span)
  1035  			if v == 0 {
  1036  				v, span, shouldhelpgc = c.nextFree(spc)
  1037  			}
  1038  			x = unsafe.Pointer(v)
  1039  			if needzero && span.needzero != 0 {
  1040  				memclrNoHeapPointers(unsafe.Pointer(v), size)
  1041  			}
  1042  		}
  1043  	} else {
  1044  		var s *mspan
  1045  		shouldhelpgc = true
  1046  		systemstack(func() {
  1047  			s = largeAlloc(size, needzero, noscan)
  1048  		})
  1049  		s.freeindex = 1
  1050  		s.allocCount = 1
  1051  		x = unsafe.Pointer(s.base())
  1052  		size = s.elemsize
  1053  	}
  1054  
  1055  	var scanSize uintptr
  1056  	if !noscan {
  1057  		// If allocating a defer+arg block, now that we've picked a malloc size
  1058  		// large enough to hold everything, cut the "asked for" size down to
  1059  		// just the defer header, so that the GC bitmap will record the arg block
  1060  		// as containing nothing at all (as if it were unused space at the end of
  1061  		// a malloc block caused by size rounding).
  1062  		// The defer arg areas are scanned as part of scanstack.
  1063  		if typ == deferType {
  1064  			dataSize = unsafe.Sizeof(_defer{})
  1065  		}
  1066  		heapBitsSetType(uintptr(x), size, dataSize, typ)
  1067  		if dataSize > typ.size {
  1068  			// Array allocation. If there are any
  1069  			// pointers, GC has to scan to the last
  1070  			// element.
  1071  			if typ.ptrdata != 0 {
  1072  				scanSize = dataSize - typ.size + typ.ptrdata
  1073  			}
  1074  		} else {
  1075  			scanSize = typ.ptrdata
  1076  		}
  1077  		c.local_scan += scanSize
  1078  	}
  1079  
  1080  	// Ensure that the stores above that initialize x to
  1081  	// type-safe memory and set the heap bits occur before
  1082  	// the caller can make x observable to the garbage
  1083  	// collector. Otherwise, on weakly ordered machines,
  1084  	// the garbage collector could follow a pointer to x,
  1085  	// but see uninitialized memory or stale heap bits.
  1086  	publicationBarrier()
  1087  
  1088  	// Allocate black during GC.
  1089  	// All slots hold nil so no scanning is needed.
  1090  	// This may be racing with GC so do it atomically if there can be
  1091  	// a race marking the bit.
  1092  	if gcphase != _GCoff {
  1093  		gcmarknewobject(uintptr(x), size, scanSize)
  1094  	}
  1095  
  1096  	if raceenabled {
  1097  		racemalloc(x, size)
  1098  	}
  1099  
  1100  	if msanenabled {
  1101  		msanmalloc(x, size)
  1102  	}
  1103  
  1104  	mp.mallocing = 0
  1105  	releasem(mp)
  1106  
  1107  	if debug.allocfreetrace != 0 {
  1108  		tracealloc(x, size, typ)
  1109  	}
  1110  
  1111  	if rate := MemProfileRate; rate > 0 {
  1112  		if rate != 1 && size < c.next_sample {
  1113  			c.next_sample -= size
  1114  		} else {
  1115  			mp := acquirem()
  1116  			profilealloc(mp, x, size)
  1117  			releasem(mp)
  1118  		}
  1119  	}
  1120  
  1121  	if assistG != nil {
  1122  		// Account for internal fragmentation in the assist
  1123  		// debt now that we know it.
  1124  		assistG.gcAssistBytes -= int64(size - dataSize)
  1125  	}
  1126  
  1127  	if shouldhelpgc {
  1128  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1129  			gcStart(t)
  1130  		}
  1131  	}
  1132  
  1133  	return x
  1134  }
  1135  
  1136  func largeAlloc(size uintptr, needzero bool, noscan bool) *mspan {
  1137  	// print("largeAlloc size=", size, "\n")
  1138  
  1139  	if size+_PageSize < size {
  1140  		throw("out of memory")
  1141  	}
  1142  	npages := size >> _PageShift
  1143  	if size&_PageMask != 0 {
  1144  		npages++
  1145  	}
  1146  
  1147  	// Deduct credit for this span allocation and sweep if
  1148  	// necessary. mHeap_Alloc will also sweep npages, so this only
  1149  	// pays the debt down to npage pages.
  1150  	deductSweepCredit(npages*_PageSize, npages)
  1151  
  1152  	s := mheap_.alloc(npages, makeSpanClass(0, noscan), needzero)
  1153  	if s == nil {
  1154  		throw("out of memory")
  1155  	}
  1156  	s.limit = s.base() + size
  1157  	heapBitsForAddr(s.base()).initSpan(s)
  1158  	return s
  1159  }
  1160  
  1161  // implementation of new builtin
  1162  // compiler (both frontend and SSA backend) knows the signature
  1163  // of this function
  1164  func newobject(typ *_type) unsafe.Pointer {
  1165  	return mallocgc(typ.size, typ, true)
  1166  }
  1167  
  1168  //go:linkname reflect_unsafe_New reflect.unsafe_New
  1169  func reflect_unsafe_New(typ *_type) unsafe.Pointer {
  1170  	return mallocgc(typ.size, typ, true)
  1171  }
  1172  
  1173  //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
  1174  func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
  1175  	return mallocgc(typ.size, typ, true)
  1176  }
  1177  
  1178  // newarray allocates an array of n elements of type typ.
  1179  func newarray(typ *_type, n int) unsafe.Pointer {
  1180  	if n == 1 {
  1181  		return mallocgc(typ.size, typ, true)
  1182  	}
  1183  	mem, overflow := math.MulUintptr(typ.size, uintptr(n))
  1184  	if overflow || mem > maxAlloc || n < 0 {
  1185  		panic(plainError("runtime: allocation size out of range"))
  1186  	}
  1187  	return mallocgc(mem, typ, true)
  1188  }
  1189  
  1190  //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
  1191  func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
  1192  	return newarray(typ, n)
  1193  }
  1194  
  1195  func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
  1196  	mp.mcache.next_sample = nextSample()
  1197  	mProf_Malloc(x, size)
  1198  }
  1199  
  1200  // nextSample returns the next sampling point for heap profiling. The goal is
  1201  // to sample allocations on average every MemProfileRate bytes, but with a
  1202  // completely random distribution over the allocation timeline; this
  1203  // corresponds to a Poisson process with parameter MemProfileRate. In Poisson
  1204  // processes, the distance between two samples follows the exponential
  1205  // distribution (exp(MemProfileRate)), so the best return value is a random
  1206  // number taken from an exponential distribution whose mean is MemProfileRate.
  1207  func nextSample() uintptr {
  1208  	if GOOS == "plan9" {
  1209  		// Plan 9 doesn't support floating point in note handler.
  1210  		if g := getg(); g == g.m.gsignal {
  1211  			return nextSampleNoFP()
  1212  		}
  1213  	}
  1214  
  1215  	return uintptr(fastexprand(MemProfileRate))
  1216  }
  1217  
  1218  // fastexprand returns a random number from an exponential distribution with
  1219  // the specified mean.
  1220  func fastexprand(mean int) int32 {
  1221  	// Avoid overflow. Maximum possible step is
  1222  	// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
  1223  	switch {
  1224  	case mean > 0x7000000:
  1225  		mean = 0x7000000
  1226  	case mean == 0:
  1227  		return 0
  1228  	}
  1229  
  1230  	// Take a random sample of the exponential distribution exp(-mean*x).
  1231  	// The probability distribution function is mean*exp(-mean*x), so the CDF is
  1232  	// p = 1 - exp(-mean*x), so
  1233  	// q = 1 - p == exp(-mean*x)
  1234  	// log_e(q) = -mean*x
  1235  	// -log_e(q)/mean = x
  1236  	// x = -log_e(q) * mean
  1237  	// x = log_2(q) * (-log_e(2)) * mean    ; Using log_2 for efficiency
  1238  	const randomBitCount = 26
  1239  	q := fastrand()%(1<<randomBitCount) + 1
  1240  	qlog := fastlog2(float64(q)) - randomBitCount
  1241  	if qlog > 0 {
  1242  		qlog = 0
  1243  	}
  1244  	const minusLog2 = -0.6931471805599453 // -ln(2)
  1245  	return int32(qlog*(minusLog2*float64(mean))) + 1
  1246  }
  1247  
  1248  // nextSampleNoFP is similar to nextSample, but uses older,
  1249  // simpler code to avoid floating point.
  1250  func nextSampleNoFP() uintptr {
  1251  	// Set first allocation sample size.
  1252  	rate := MemProfileRate
  1253  	if rate > 0x3fffffff { // make 2*rate not overflow
  1254  		rate = 0x3fffffff
  1255  	}
  1256  	if rate != 0 {
  1257  		return uintptr(fastrand() % uint32(2*rate))
  1258  	}
  1259  	return 0
  1260  }
  1261  
  1262  type persistentAlloc struct {
  1263  	base *notInHeap
  1264  	off  uintptr
  1265  }
  1266  
  1267  var globalAlloc struct {
  1268  	mutex
  1269  	persistentAlloc
  1270  }
  1271  
  1272  // persistentChunkSize is the number of bytes we allocate when we grow
  1273  // a persistentAlloc.
  1274  const persistentChunkSize = 256 << 10
  1275  
  1276  // persistentChunks is a list of all the persistent chunks we have
  1277  // allocated. The list is maintained through the first word in the
  1278  // persistent chunk. This is updated atomically.
  1279  var persistentChunks *notInHeap
  1280  
  1281  // Wrapper around sysAlloc that can allocate small chunks.
  1282  // There is no associated free operation.
  1283  // Intended for things like function/type/debug-related persistent data.
  1284  // If align is 0, uses default align (currently 8).
  1285  // The returned memory will be zeroed.
  1286  //
  1287  // Consider marking persistentalloc'd types go:notinheap.
  1288  func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
  1289  	var p *notInHeap
  1290  	systemstack(func() {
  1291  		p = persistentalloc1(size, align, sysStat)
  1292  	})
  1293  	return unsafe.Pointer(p)
  1294  }
  1295  
  1296  // Must run on system stack because stack growth can (re)invoke it.
  1297  // See issue 9174.
  1298  //go:systemstack
  1299  func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap {
  1300  	const (
  1301  		maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
  1302  	)
  1303  
  1304  	if size == 0 {
  1305  		throw("persistentalloc: size == 0")
  1306  	}
  1307  	if align != 0 {
  1308  		if align&(align-1) != 0 {
  1309  			throw("persistentalloc: align is not a power of 2")
  1310  		}
  1311  		if align > _PageSize {
  1312  			throw("persistentalloc: align is too large")
  1313  		}
  1314  	} else {
  1315  		align = 8
  1316  	}
  1317  
  1318  	if size >= maxBlock {
  1319  		return (*notInHeap)(sysAlloc(size, sysStat))
  1320  	}
  1321  
  1322  	mp := acquirem()
  1323  	var persistent *persistentAlloc
  1324  	if mp != nil && mp.p != 0 {
  1325  		persistent = &mp.p.ptr().palloc
  1326  	} else {
  1327  		lock(&globalAlloc.mutex)
  1328  		persistent = &globalAlloc.persistentAlloc
  1329  	}
  1330  	persistent.off = alignUp(persistent.off, align)
  1331  	if persistent.off+size > persistentChunkSize || persistent.base == nil {
  1332  		persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
  1333  		if persistent.base == nil {
  1334  			if persistent == &globalAlloc.persistentAlloc {
  1335  				unlock(&globalAlloc.mutex)
  1336  			}
  1337  			throw("runtime: cannot allocate memory")
  1338  		}
  1339  
  1340  		// Add the new chunk to the persistentChunks list.
  1341  		for {
  1342  			chunks := uintptr(unsafe.Pointer(persistentChunks))
  1343  			*(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
  1344  			if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
  1345  				break
  1346  			}
  1347  		}
  1348  		persistent.off = alignUp(sys.PtrSize, align)
  1349  	}
  1350  	p := persistent.base.add(persistent.off)
  1351  	persistent.off += size
  1352  	releasem(mp)
  1353  	if persistent == &globalAlloc.persistentAlloc {
  1354  		unlock(&globalAlloc.mutex)
  1355  	}
  1356  
  1357  	if sysStat != &memstats.other_sys {
  1358  		mSysStatInc(sysStat, size)
  1359  		mSysStatDec(&memstats.other_sys, size)
  1360  	}
  1361  	return p
  1362  }
  1363  
  1364  // inPersistentAlloc reports whether p points to memory allocated by
  1365  // persistentalloc. This must be nosplit because it is called by the
  1366  // cgo checker code, which is called by the write barrier code.
  1367  //go:nosplit
  1368  func inPersistentAlloc(p uintptr) bool {
  1369  	chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
  1370  	for chunk != 0 {
  1371  		if p >= chunk && p < chunk+persistentChunkSize {
  1372  			return true
  1373  		}
  1374  		chunk = *(*uintptr)(unsafe.Pointer(chunk))
  1375  	}
  1376  	return false
  1377  }
  1378  
  1379  // linearAlloc is a simple linear allocator that pre-reserves a region
  1380  // of memory and then maps that region into the Ready state as needed. The
  1381  // caller is responsible for locking.
  1382  type linearAlloc struct {
  1383  	next   uintptr // next free byte
  1384  	mapped uintptr // one byte past end of mapped space
  1385  	end    uintptr // end of reserved space
  1386  }
  1387  
  1388  func (l *linearAlloc) init(base, size uintptr) {
  1389  	l.next, l.mapped = base, base
  1390  	l.end = base + size
  1391  }
  1392  
  1393  func (l *linearAlloc) alloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
  1394  	p := alignUp(l.next, align)
  1395  	if p+size > l.end {
  1396  		return nil
  1397  	}
  1398  	l.next = p + size
  1399  	if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
  1400  		// Transition from Reserved to Prepared to Ready.
  1401  		sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
  1402  		sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
  1403  		l.mapped = pEnd
  1404  	}
  1405  	return unsafe.Pointer(p)
  1406  }
  1407  
  1408  // notInHeap is off-heap memory allocated by a lower-level allocator
  1409  // like sysAlloc or persistentAlloc.
  1410  //
  1411  // In general, it's better to use real types marked as go:notinheap,
  1412  // but this serves as a generic type for situations where that isn't
  1413  // possible (like in the allocators).
  1414  //
  1415  // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
  1416  //
  1417  //go:notinheap
  1418  type notInHeap struct{}
  1419  
  1420  func (p *notInHeap) add(bytes uintptr) *notInHeap {
  1421  	return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
  1422  }
  1423  

View as plain text