Source file src/runtime/malloc.go
Documentation: runtime
1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory allocator. 6 // 7 // This was originally based on tcmalloc, but has diverged quite a bit. 8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html 9 10 // The main allocator works in runs of pages. 11 // Small allocation sizes (up to and including 32 kB) are 12 // rounded to one of about 70 size classes, each of which 13 // has its own free set of objects of exactly that size. 14 // Any free page of memory can be split into a set of objects 15 // of one size class, which are then managed using a free bitmap. 16 // 17 // The allocator's data structures are: 18 // 19 // fixalloc: a free-list allocator for fixed-size off-heap objects, 20 // used to manage storage used by the allocator. 21 // mheap: the malloc heap, managed at page (8192-byte) granularity. 22 // mspan: a run of in-use pages managed by the mheap. 23 // mcentral: collects all spans of a given size class. 24 // mcache: a per-P cache of mspans with free space. 25 // mstats: allocation statistics. 26 // 27 // Allocating a small object proceeds up a hierarchy of caches: 28 // 29 // 1. Round the size up to one of the small size classes 30 // and look in the corresponding mspan in this P's mcache. 31 // Scan the mspan's free bitmap to find a free slot. 32 // If there is a free slot, allocate it. 33 // This can all be done without acquiring a lock. 34 // 35 // 2. If the mspan has no free slots, obtain a new mspan 36 // from the mcentral's list of mspans of the required size 37 // class that have free space. 38 // Obtaining a whole span amortizes the cost of locking 39 // the mcentral. 40 // 41 // 3. If the mcentral's mspan list is empty, obtain a run 42 // of pages from the mheap to use for the mspan. 43 // 44 // 4. If the mheap is empty or has no page runs large enough, 45 // allocate a new group of pages (at least 1MB) from the 46 // operating system. Allocating a large run of pages 47 // amortizes the cost of talking to the operating system. 48 // 49 // Sweeping an mspan and freeing objects on it proceeds up a similar 50 // hierarchy: 51 // 52 // 1. If the mspan is being swept in response to allocation, it 53 // is returned to the mcache to satisfy the allocation. 54 // 55 // 2. Otherwise, if the mspan still has allocated objects in it, 56 // it is placed on the mcentral free list for the mspan's size 57 // class. 58 // 59 // 3. Otherwise, if all objects in the mspan are free, the mspan's 60 // pages are returned to the mheap and the mspan is now dead. 61 // 62 // Allocating and freeing a large object uses the mheap 63 // directly, bypassing the mcache and mcentral. 64 // 65 // If mspan.needzero is false, then free object slots in the mspan are 66 // already zeroed. Otherwise if needzero is true, objects are zeroed as 67 // they are allocated. There are various benefits to delaying zeroing 68 // this way: 69 // 70 // 1. Stack frame allocation can avoid zeroing altogether. 71 // 72 // 2. It exhibits better temporal locality, since the program is 73 // probably about to write to the memory. 74 // 75 // 3. We don't zero pages that never get reused. 76 77 // Virtual memory layout 78 // 79 // The heap consists of a set of arenas, which are 64MB on 64-bit and 80 // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also 81 // aligned to the arena size. 82 // 83 // Each arena has an associated heapArena object that stores the 84 // metadata for that arena: the heap bitmap for all words in the arena 85 // and the span map for all pages in the arena. heapArena objects are 86 // themselves allocated off-heap. 87 // 88 // Since arenas are aligned, the address space can be viewed as a 89 // series of arena frames. The arena map (mheap_.arenas) maps from 90 // arena frame number to *heapArena, or nil for parts of the address 91 // space not backed by the Go heap. The arena map is structured as a 92 // two-level array consisting of a "L1" arena map and many "L2" arena 93 // maps; however, since arenas are large, on many architectures, the 94 // arena map consists of a single, large L2 map. 95 // 96 // The arena map covers the entire possible address space, allowing 97 // the Go heap to use any part of the address space. The allocator 98 // attempts to keep arenas contiguous so that large spans (and hence 99 // large objects) can cross arenas. 100 101 package runtime 102 103 import ( 104 "runtime/internal/atomic" 105 "runtime/internal/math" 106 "runtime/internal/sys" 107 "unsafe" 108 ) 109 110 const ( 111 debugMalloc = false 112 113 maxTinySize = _TinySize 114 tinySizeClass = _TinySizeClass 115 maxSmallSize = _MaxSmallSize 116 117 pageShift = _PageShift 118 pageSize = _PageSize 119 pageMask = _PageMask 120 // By construction, single page spans of the smallest object class 121 // have the most objects per span. 122 maxObjsPerSpan = pageSize / 8 123 124 concurrentSweep = _ConcurrentSweep 125 126 _PageSize = 1 << _PageShift 127 _PageMask = _PageSize - 1 128 129 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 130 _64bit = 1 << (^uintptr(0) >> 63) / 2 131 132 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 133 _TinySize = 16 134 _TinySizeClass = int8(2) 135 136 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 137 138 // Per-P, per order stack segment cache size. 139 _StackCacheSize = 32 * 1024 140 141 // Number of orders that get caching. Order 0 is FixedStack 142 // and each successive order is twice as large. 143 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 144 // will be allocated directly. 145 // Since FixedStack is different on different systems, we 146 // must vary NumStackOrders to keep the same maximum cached size. 147 // OS | FixedStack | NumStackOrders 148 // -----------------+------------+--------------- 149 // linux/darwin/bsd | 2KB | 4 150 // windows/32 | 4KB | 3 151 // windows/64 | 8KB | 2 152 // plan9 | 4KB | 3 153 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9 154 155 // heapAddrBits is the number of bits in a heap address. On 156 // amd64, addresses are sign-extended beyond heapAddrBits. On 157 // other arches, they are zero-extended. 158 // 159 // On most 64-bit platforms, we limit this to 48 bits based on a 160 // combination of hardware and OS limitations. 161 // 162 // amd64 hardware limits addresses to 48 bits, sign-extended 163 // to 64 bits. Addresses where the top 16 bits are not either 164 // all 0 or all 1 are "non-canonical" and invalid. Because of 165 // these "negative" addresses, we offset addresses by 1<<47 166 // (arenaBaseOffset) on amd64 before computing indexes into 167 // the heap arenas index. In 2017, amd64 hardware added 168 // support for 57 bit addresses; however, currently only Linux 169 // supports this extension and the kernel will never choose an 170 // address above 1<<47 unless mmap is called with a hint 171 // address above 1<<47 (which we never do). 172 // 173 // arm64 hardware (as of ARMv8) limits user addresses to 48 174 // bits, in the range [0, 1<<48). 175 // 176 // ppc64, mips64, and s390x support arbitrary 64 bit addresses 177 // in hardware. On Linux, Go leans on stricter OS limits. Based 178 // on Linux's processor.h, the user address space is limited as 179 // follows on 64-bit architectures: 180 // 181 // Architecture Name Maximum Value (exclusive) 182 // --------------------------------------------------------------------- 183 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses) 184 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses) 185 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses) 186 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) 187 // s390x TASK_SIZE 1<<64 (64 bit addresses) 188 // 189 // These limits may increase over time, but are currently at 190 // most 48 bits except on s390x. On all architectures, Linux 191 // starts placing mmap'd regions at addresses that are 192 // significantly below 48 bits, so even if it's possible to 193 // exceed Go's 48 bit limit, it's extremely unlikely in 194 // practice. 195 // 196 // On 32-bit platforms, we accept the full 32-bit address 197 // space because doing so is cheap. 198 // mips32 only has access to the low 2GB of virtual memory, so 199 // we further limit it to 31 bits. 200 // 201 // On darwin/arm64, although 64-bit pointers are presumably 202 // available, pointers are truncated to 33 bits. Furthermore, 203 // only the top 4 GiB of the address space are actually available 204 // to the application, but we allow the whole 33 bits anyway for 205 // simplicity. 206 // TODO(mknyszek): Consider limiting it to 32 bits and using 207 // arenaBaseOffset to offset into the top 4 GiB. 208 // 209 // WebAssembly currently has a limit of 4GB linear memory. 210 heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosDarwin*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosDarwin*sys.GoarchArm64 211 212 // maxAlloc is the maximum size of an allocation. On 64-bit, 213 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On 214 // 32-bit, however, this is one less than 1<<32 because the 215 // number of bytes in the address space doesn't actually fit 216 // in a uintptr. 217 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1 218 219 // The number of bits in a heap address, the size of heap 220 // arenas, and the L1 and L2 arena map sizes are related by 221 // 222 // (1 << addr bits) = arena size * L1 entries * L2 entries 223 // 224 // Currently, we balance these as follows: 225 // 226 // Platform Addr bits Arena size L1 entries L2 entries 227 // -------------- --------- ---------- ---------- ----------- 228 // */64-bit 48 64MB 1 4M (32MB) 229 // windows/64-bit 48 4MB 64 1M (8MB) 230 // */32-bit 32 4MB 1 1024 (4KB) 231 // */mips(le) 31 4MB 1 512 (2KB) 232 233 // heapArenaBytes is the size of a heap arena. The heap 234 // consists of mappings of size heapArenaBytes, aligned to 235 // heapArenaBytes. The initial heap mapping is one arena. 236 // 237 // This is currently 64MB on 64-bit non-Windows and 4MB on 238 // 32-bit and on Windows. We use smaller arenas on Windows 239 // because all committed memory is charged to the process, 240 // even if it's not touched. Hence, for processes with small 241 // heaps, the mapped arena space needs to be commensurate. 242 // This is particularly important with the race detector, 243 // since it significantly amplifies the cost of committed 244 // memory. 245 heapArenaBytes = 1 << logHeapArenaBytes 246 247 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity, 248 // prefer using heapArenaBytes where possible (we need the 249 // constant to compute some other constants). 250 logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm 251 252 // heapArenaBitmapBytes is the size of each heap arena's bitmap. 253 heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2) 254 255 pagesPerArena = heapArenaBytes / pageSize 256 257 // arenaL1Bits is the number of bits of the arena number 258 // covered by the first level arena map. 259 // 260 // This number should be small, since the first level arena 261 // map requires PtrSize*(1<<arenaL1Bits) of space in the 262 // binary's BSS. It can be zero, in which case the first level 263 // index is effectively unused. There is a performance benefit 264 // to this, since the generated code can be more efficient, 265 // but comes at the cost of having a large L2 mapping. 266 // 267 // We use the L1 map on 64-bit Windows because the arena size 268 // is small, but the address space is still 48 bits, and 269 // there's a high cost to having a large L2. 270 arenaL1Bits = 6 * (_64bit * sys.GoosWindows) 271 272 // arenaL2Bits is the number of bits of the arena number 273 // covered by the second level arena index. 274 // 275 // The size of each arena map allocation is proportional to 276 // 1<<arenaL2Bits, so it's important that this not be too 277 // large. 48 bits leads to 32MB arena index allocations, which 278 // is about the practical threshold. 279 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits 280 281 // arenaL1Shift is the number of bits to shift an arena frame 282 // number by to compute an index into the first level arena map. 283 arenaL1Shift = arenaL2Bits 284 285 // arenaBits is the total bits in a combined arena map index. 286 // This is split between the index into the L1 arena map and 287 // the L2 arena map. 288 arenaBits = arenaL1Bits + arenaL2Bits 289 290 // arenaBaseOffset is the pointer value that corresponds to 291 // index 0 in the heap arena map. 292 // 293 // On amd64, the address space is 48 bits, sign extended to 64 294 // bits. This offset lets us handle "negative" addresses (or 295 // high addresses if viewed as unsigned). 296 // 297 // On aix/ppc64, this offset allows to keep the heapAddrBits to 298 // 48. Otherwize, it would be 60 in order to handle mmap addresses 299 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this 300 // case, the memory reserved in (s *pageAlloc).init for chunks 301 // is causing important slowdowns. 302 // 303 // On other platforms, the user address space is contiguous 304 // and starts at 0, so no offset is necessary. 305 arenaBaseOffset = 0xffff800000000000*sys.GoarchAmd64 + 0x0a00000000000000*sys.GoosAix 306 // A typed version of this constant that will make it into DWARF (for viewcore). 307 arenaBaseOffsetUintptr = uintptr(arenaBaseOffset) 308 309 // Max number of threads to run garbage collection. 310 // 2, 3, and 4 are all plausible maximums depending 311 // on the hardware details of the machine. The garbage 312 // collector scales well to 32 cpus. 313 _MaxGcproc = 32 314 315 // minLegalPointer is the smallest possible legal pointer. 316 // This is the smallest possible architectural page size, 317 // since we assume that the first page is never mapped. 318 // 319 // This should agree with minZeroPage in the compiler. 320 minLegalPointer uintptr = 4096 321 ) 322 323 // physPageSize is the size in bytes of the OS's physical pages. 324 // Mapping and unmapping operations must be done at multiples of 325 // physPageSize. 326 // 327 // This must be set by the OS init code (typically in osinit) before 328 // mallocinit. 329 var physPageSize uintptr 330 331 // physHugePageSize is the size in bytes of the OS's default physical huge 332 // page size whose allocation is opaque to the application. It is assumed 333 // and verified to be a power of two. 334 // 335 // If set, this must be set by the OS init code (typically in osinit) before 336 // mallocinit. However, setting it at all is optional, and leaving the default 337 // value is always safe (though potentially less efficient). 338 // 339 // Since physHugePageSize is always assumed to be a power of two, 340 // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. 341 // The purpose of physHugePageShift is to avoid doing divisions in 342 // performance critical functions. 343 var ( 344 physHugePageSize uintptr 345 physHugePageShift uint 346 ) 347 348 // OS memory management abstraction layer 349 // 350 // Regions of the address space managed by the runtime may be in one of four 351 // states at any given time: 352 // 1) None - Unreserved and unmapped, the default state of any region. 353 // 2) Reserved - Owned by the runtime, but accessing it would cause a fault. 354 // Does not count against the process' memory footprint. 355 // 3) Prepared - Reserved, intended not to be backed by physical memory (though 356 // an OS may implement this lazily). Can transition efficiently to 357 // Ready. Accessing memory in such a region is undefined (may 358 // fault, may give back unexpected zeroes, etc.). 359 // 4) Ready - may be accessed safely. 360 // 361 // This set of states is more than is strictly necessary to support all the 362 // currently supported platforms. One could get by with just None, Reserved, and 363 // Ready. However, the Prepared state gives us flexibility for performance 364 // purposes. For example, on POSIX-y operating systems, Reserved is usually a 365 // private anonymous mmap'd region with PROT_NONE set, and to transition 366 // to Ready would require setting PROT_READ|PROT_WRITE. However the 367 // underspecification of Prepared lets us use just MADV_FREE to transition from 368 // Ready to Prepared. Thus with the Prepared state we can set the permission 369 // bits just once early on, we can efficiently tell the OS that it's free to 370 // take pages away from us when we don't strictly need them. 371 // 372 // For each OS there is a common set of helpers defined that transition 373 // memory regions between these states. The helpers are as follows: 374 // 375 // sysAlloc transitions an OS-chosen region of memory from None to Ready. 376 // More specifically, it obtains a large chunk of zeroed memory from the 377 // operating system, typically on the order of a hundred kilobytes 378 // or a megabyte. This memory is always immediately available for use. 379 // 380 // sysFree transitions a memory region from any state to None. Therefore, it 381 // returns memory unconditionally. It is used if an out-of-memory error has been 382 // detected midway through an allocation or to carve out an aligned section of 383 // the address space. It is okay if sysFree is a no-op only if sysReserve always 384 // returns a memory region aligned to the heap allocator's alignment 385 // restrictions. 386 // 387 // sysReserve transitions a memory region from None to Reserved. It reserves 388 // address space in such a way that it would cause a fatal fault upon access 389 // (either via permissions or not committing the memory). Such a reservation is 390 // thus never backed by physical memory. 391 // If the pointer passed to it is non-nil, the caller wants the 392 // reservation there, but sysReserve can still choose another 393 // location if that one is unavailable. 394 // NOTE: sysReserve returns OS-aligned memory, but the heap allocator 395 // may use larger alignment, so the caller must be careful to realign the 396 // memory obtained by sysReserve. 397 // 398 // sysMap transitions a memory region from Reserved to Prepared. It ensures the 399 // memory region can be efficiently transitioned to Ready. 400 // 401 // sysUsed transitions a memory region from Prepared to Ready. It notifies the 402 // operating system that the memory region is needed and ensures that the region 403 // may be safely accessed. This is typically a no-op on systems that don't have 404 // an explicit commit step and hard over-commit limits, but is critical on 405 // Windows, for example. 406 // 407 // sysUnused transitions a memory region from Ready to Prepared. It notifies the 408 // operating system that the physical pages backing this memory region are no 409 // longer needed and can be reused for other purposes. The contents of a 410 // sysUnused memory region are considered forfeit and the region must not be 411 // accessed again until sysUsed is called. 412 // 413 // sysFault transitions a memory region from Ready or Prepared to Reserved. It 414 // marks a region such that it will always fault if accessed. Used only for 415 // debugging the runtime. 416 417 func mallocinit() { 418 if class_to_size[_TinySizeClass] != _TinySize { 419 throw("bad TinySizeClass") 420 } 421 422 testdefersizes() 423 424 if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 { 425 // heapBits expects modular arithmetic on bitmap 426 // addresses to work. 427 throw("heapArenaBitmapBytes not a power of 2") 428 } 429 430 // Copy class sizes out for statistics table. 431 for i := range class_to_size { 432 memstats.by_size[i].size = uint32(class_to_size[i]) 433 } 434 435 // Check physPageSize. 436 if physPageSize == 0 { 437 // The OS init code failed to fetch the physical page size. 438 throw("failed to get system page size") 439 } 440 if physPageSize > maxPhysPageSize { 441 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n") 442 throw("bad system page size") 443 } 444 if physPageSize < minPhysPageSize { 445 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 446 throw("bad system page size") 447 } 448 if physPageSize&(physPageSize-1) != 0 { 449 print("system page size (", physPageSize, ") must be a power of 2\n") 450 throw("bad system page size") 451 } 452 if physHugePageSize&(physHugePageSize-1) != 0 { 453 print("system huge page size (", physHugePageSize, ") must be a power of 2\n") 454 throw("bad system huge page size") 455 } 456 if physHugePageSize > maxPhysHugePageSize { 457 // physHugePageSize is greater than the maximum supported huge page size. 458 // Don't throw here, like in the other cases, since a system configured 459 // in this way isn't wrong, we just don't have the code to support them. 460 // Instead, silently set the huge page size to zero. 461 physHugePageSize = 0 462 } 463 if physHugePageSize != 0 { 464 // Since physHugePageSize is a power of 2, it suffices to increase 465 // physHugePageShift until 1<<physHugePageShift == physHugePageSize. 466 for 1<<physHugePageShift != physHugePageSize { 467 physHugePageShift++ 468 } 469 } 470 if pagesPerArena%pagesPerSpanRoot != 0 { 471 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n") 472 throw("bad pagesPerSpanRoot") 473 } 474 if pagesPerArena%pagesPerReclaimerChunk != 0 { 475 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n") 476 throw("bad pagesPerReclaimerChunk") 477 } 478 479 // Initialize the heap. 480 mheap_.init() 481 mcache0 = allocmcache() 482 lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas) 483 lockInit(&proflock, lockRankProf) 484 lockInit(&globalAlloc.mutex, lockRankGlobalAlloc) 485 486 // Create initial arena growth hints. 487 if sys.PtrSize == 8 { 488 // On a 64-bit machine, we pick the following hints 489 // because: 490 // 491 // 1. Starting from the middle of the address space 492 // makes it easier to grow out a contiguous range 493 // without running in to some other mapping. 494 // 495 // 2. This makes Go heap addresses more easily 496 // recognizable when debugging. 497 // 498 // 3. Stack scanning in gccgo is still conservative, 499 // so it's important that addresses be distinguishable 500 // from other data. 501 // 502 // Starting at 0x00c0 means that the valid memory addresses 503 // will begin 0x00c0, 0x00c1, ... 504 // In little-endian, that's c0 00, c1 00, ... None of those are valid 505 // UTF-8 sequences, and they are otherwise as far away from 506 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 507 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 508 // on OS X during thread allocations. 0x00c0 causes conflicts with 509 // AddressSanitizer which reserves all memory up to 0x0100. 510 // These choices reduce the odds of a conservative garbage collector 511 // not collecting memory because some non-pointer block of memory 512 // had a bit pattern that matched a memory address. 513 // 514 // However, on arm64, we ignore all this advice above and slam the 515 // allocation at 0x40 << 32 because when using 4k pages with 3-level 516 // translation buffers, the user address space is limited to 39 bits 517 // On darwin/arm64, the address space is even smaller. 518 // 519 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit. 520 // processes. 521 for i := 0x7f; i >= 0; i-- { 522 var p uintptr 523 switch { 524 case GOARCH == "arm64" && GOOS == "darwin": 525 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 526 case GOARCH == "arm64": 527 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 528 case GOOS == "aix": 529 if i == 0 { 530 // We don't use addresses directly after 0x0A00000000000000 531 // to avoid collisions with others mmaps done by non-go programs. 532 continue 533 } 534 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52) 535 case raceenabled: 536 // The TSAN runtime requires the heap 537 // to be in the range [0x00c000000000, 538 // 0x00e000000000). 539 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32) 540 if p >= uintptrMask&0x00e000000000 { 541 continue 542 } 543 default: 544 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 545 } 546 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 547 hint.addr = p 548 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 549 } 550 } else { 551 // On a 32-bit machine, we're much more concerned 552 // about keeping the usable heap contiguous. 553 // Hence: 554 // 555 // 1. We reserve space for all heapArenas up front so 556 // they don't get interleaved with the heap. They're 557 // ~258MB, so this isn't too bad. (We could reserve a 558 // smaller amount of space up front if this is a 559 // problem.) 560 // 561 // 2. We hint the heap to start right above the end of 562 // the binary so we have the best chance of keeping it 563 // contiguous. 564 // 565 // 3. We try to stake out a reasonably large initial 566 // heap reservation. 567 568 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) 569 meta := uintptr(sysReserve(nil, arenaMetaSize)) 570 if meta != 0 { 571 mheap_.heapArenaAlloc.init(meta, arenaMetaSize) 572 } 573 574 // We want to start the arena low, but if we're linked 575 // against C code, it's possible global constructors 576 // have called malloc and adjusted the process' brk. 577 // Query the brk so we can avoid trying to map the 578 // region over it (which will cause the kernel to put 579 // the region somewhere else, likely at a high 580 // address). 581 procBrk := sbrk0() 582 583 // If we ask for the end of the data segment but the 584 // operating system requires a little more space 585 // before we can start allocating, it will give out a 586 // slightly higher pointer. Except QEMU, which is 587 // buggy, as usual: it won't adjust the pointer 588 // upward. So adjust it upward a little bit ourselves: 589 // 1/4 MB to get away from the running binary image. 590 p := firstmoduledata.end 591 if p < procBrk { 592 p = procBrk 593 } 594 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end { 595 p = mheap_.heapArenaAlloc.end 596 } 597 p = alignUp(p+(256<<10), heapArenaBytes) 598 // Because we're worried about fragmentation on 599 // 32-bit, we try to make a large initial reservation. 600 arenaSizes := []uintptr{ 601 512 << 20, 602 256 << 20, 603 128 << 20, 604 } 605 for _, arenaSize := range arenaSizes { 606 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes) 607 if a != nil { 608 mheap_.arena.init(uintptr(a), size) 609 p = mheap_.arena.end // For hint below 610 break 611 } 612 } 613 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 614 hint.addr = p 615 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 616 } 617 } 618 619 // sysAlloc allocates heap arena space for at least n bytes. The 620 // returned pointer is always heapArenaBytes-aligned and backed by 621 // h.arenas metadata. The returned size is always a multiple of 622 // heapArenaBytes. sysAlloc returns nil on failure. 623 // There is no corresponding free function. 624 // 625 // sysAlloc returns a memory region in the Prepared state. This region must 626 // be transitioned to Ready before use. 627 // 628 // h must be locked. 629 func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) { 630 n = alignUp(n, heapArenaBytes) 631 632 // First, try the arena pre-reservation. 633 v = h.arena.alloc(n, heapArenaBytes, &memstats.heap_sys) 634 if v != nil { 635 size = n 636 goto mapped 637 } 638 639 // Try to grow the heap at a hint address. 640 for h.arenaHints != nil { 641 hint := h.arenaHints 642 p := hint.addr 643 if hint.down { 644 p -= n 645 } 646 if p+n < p { 647 // We can't use this, so don't ask. 648 v = nil 649 } else if arenaIndex(p+n-1) >= 1<<arenaBits { 650 // Outside addressable heap. Can't use. 651 v = nil 652 } else { 653 v = sysReserve(unsafe.Pointer(p), n) 654 } 655 if p == uintptr(v) { 656 // Success. Update the hint. 657 if !hint.down { 658 p += n 659 } 660 hint.addr = p 661 size = n 662 break 663 } 664 // Failed. Discard this hint and try the next. 665 // 666 // TODO: This would be cleaner if sysReserve could be 667 // told to only return the requested address. In 668 // particular, this is already how Windows behaves, so 669 // it would simplify things there. 670 if v != nil { 671 sysFree(v, n, nil) 672 } 673 h.arenaHints = hint.next 674 h.arenaHintAlloc.free(unsafe.Pointer(hint)) 675 } 676 677 if size == 0 { 678 if raceenabled { 679 // The race detector assumes the heap lives in 680 // [0x00c000000000, 0x00e000000000), but we 681 // just ran out of hints in this region. Give 682 // a nice failure. 683 throw("too many address space collisions for -race mode") 684 } 685 686 // All of the hints failed, so we'll take any 687 // (sufficiently aligned) address the kernel will give 688 // us. 689 v, size = sysReserveAligned(nil, n, heapArenaBytes) 690 if v == nil { 691 return nil, 0 692 } 693 694 // Create new hints for extending this region. 695 hint := (*arenaHint)(h.arenaHintAlloc.alloc()) 696 hint.addr, hint.down = uintptr(v), true 697 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 698 hint = (*arenaHint)(h.arenaHintAlloc.alloc()) 699 hint.addr = uintptr(v) + size 700 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 701 } 702 703 // Check for bad pointers or pointers we can't use. 704 { 705 var bad string 706 p := uintptr(v) 707 if p+size < p { 708 bad = "region exceeds uintptr range" 709 } else if arenaIndex(p) >= 1<<arenaBits { 710 bad = "base outside usable address space" 711 } else if arenaIndex(p+size-1) >= 1<<arenaBits { 712 bad = "end outside usable address space" 713 } 714 if bad != "" { 715 // This should be impossible on most architectures, 716 // but it would be really confusing to debug. 717 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n") 718 throw("memory reservation exceeds address space limit") 719 } 720 } 721 722 if uintptr(v)&(heapArenaBytes-1) != 0 { 723 throw("misrounded allocation in sysAlloc") 724 } 725 726 // Transition from Reserved to Prepared. 727 sysMap(v, size, &memstats.heap_sys) 728 729 mapped: 730 // Create arena metadata. 731 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ { 732 l2 := h.arenas[ri.l1()] 733 if l2 == nil { 734 // Allocate an L2 arena map. 735 l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil)) 736 if l2 == nil { 737 throw("out of memory allocating heap arena map") 738 } 739 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2)) 740 } 741 742 if l2[ri.l2()] != nil { 743 throw("arena already initialized") 744 } 745 var r *heapArena 746 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys)) 747 if r == nil { 748 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys)) 749 if r == nil { 750 throw("out of memory allocating heap arena metadata") 751 } 752 } 753 754 // Add the arena to the arenas list. 755 if len(h.allArenas) == cap(h.allArenas) { 756 size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize 757 if size == 0 { 758 size = physPageSize 759 } 760 newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gc_sys)) 761 if newArray == nil { 762 throw("out of memory allocating allArenas") 763 } 764 oldSlice := h.allArenas 765 *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)} 766 copy(h.allArenas, oldSlice) 767 // Do not free the old backing array because 768 // there may be concurrent readers. Since we 769 // double the array each time, this can lead 770 // to at most 2x waste. 771 } 772 h.allArenas = h.allArenas[:len(h.allArenas)+1] 773 h.allArenas[len(h.allArenas)-1] = ri 774 775 // Store atomically just in case an object from the 776 // new heap arena becomes visible before the heap lock 777 // is released (which shouldn't happen, but there's 778 // little downside to this). 779 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r)) 780 } 781 782 // Tell the race detector about the new heap memory. 783 if raceenabled { 784 racemapshadow(v, size) 785 } 786 787 return 788 } 789 790 // sysReserveAligned is like sysReserve, but the returned pointer is 791 // aligned to align bytes. It may reserve either n or n+align bytes, 792 // so it returns the size that was reserved. 793 func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) { 794 // Since the alignment is rather large in uses of this 795 // function, we're not likely to get it by chance, so we ask 796 // for a larger region and remove the parts we don't need. 797 retries := 0 798 retry: 799 p := uintptr(sysReserve(v, size+align)) 800 switch { 801 case p == 0: 802 return nil, 0 803 case p&(align-1) == 0: 804 // We got lucky and got an aligned region, so we can 805 // use the whole thing. 806 return unsafe.Pointer(p), size + align 807 case GOOS == "windows": 808 // On Windows we can't release pieces of a 809 // reservation, so we release the whole thing and 810 // re-reserve the aligned sub-region. This may race, 811 // so we may have to try again. 812 sysFree(unsafe.Pointer(p), size+align, nil) 813 p = alignUp(p, align) 814 p2 := sysReserve(unsafe.Pointer(p), size) 815 if p != uintptr(p2) { 816 // Must have raced. Try again. 817 sysFree(p2, size, nil) 818 if retries++; retries == 100 { 819 throw("failed to allocate aligned heap memory; too many retries") 820 } 821 goto retry 822 } 823 // Success. 824 return p2, size 825 default: 826 // Trim off the unaligned parts. 827 pAligned := alignUp(p, align) 828 sysFree(unsafe.Pointer(p), pAligned-p, nil) 829 end := pAligned + size 830 endLen := (p + size + align) - end 831 if endLen > 0 { 832 sysFree(unsafe.Pointer(end), endLen, nil) 833 } 834 return unsafe.Pointer(pAligned), size 835 } 836 } 837 838 // base address for all 0-byte allocations 839 var zerobase uintptr 840 841 // nextFreeFast returns the next free object if one is quickly available. 842 // Otherwise it returns 0. 843 func nextFreeFast(s *mspan) gclinkptr { 844 theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache? 845 if theBit < 64 { 846 result := s.freeindex + uintptr(theBit) 847 if result < s.nelems { 848 freeidx := result + 1 849 if freeidx%64 == 0 && freeidx != s.nelems { 850 return 0 851 } 852 s.allocCache >>= uint(theBit + 1) 853 s.freeindex = freeidx 854 s.allocCount++ 855 return gclinkptr(result*s.elemsize + s.base()) 856 } 857 } 858 return 0 859 } 860 861 // nextFree returns the next free object from the cached span if one is available. 862 // Otherwise it refills the cache with a span with an available object and 863 // returns that object along with a flag indicating that this was a heavy 864 // weight allocation. If it is a heavy weight allocation the caller must 865 // determine whether a new GC cycle needs to be started or if the GC is active 866 // whether this goroutine needs to assist the GC. 867 // 868 // Must run in a non-preemptible context since otherwise the owner of 869 // c could change. 870 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) { 871 s = c.alloc[spc] 872 shouldhelpgc = false 873 freeIndex := s.nextFreeIndex() 874 if freeIndex == s.nelems { 875 // The span is full. 876 if uintptr(s.allocCount) != s.nelems { 877 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 878 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 879 } 880 c.refill(spc) 881 shouldhelpgc = true 882 s = c.alloc[spc] 883 884 freeIndex = s.nextFreeIndex() 885 } 886 887 if freeIndex >= s.nelems { 888 throw("freeIndex is not valid") 889 } 890 891 v = gclinkptr(freeIndex*s.elemsize + s.base()) 892 s.allocCount++ 893 if uintptr(s.allocCount) > s.nelems { 894 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 895 throw("s.allocCount > s.nelems") 896 } 897 return 898 } 899 900 // Allocate an object of size bytes. 901 // Small objects are allocated from the per-P cache's free lists. 902 // Large objects (> 32 kB) are allocated straight from the heap. 903 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 904 if gcphase == _GCmarktermination { 905 throw("mallocgc called with gcphase == _GCmarktermination") 906 } 907 908 if size == 0 { 909 return unsafe.Pointer(&zerobase) 910 } 911 912 if debug.sbrk != 0 { 913 align := uintptr(16) 914 if typ != nil { 915 // TODO(austin): This should be just 916 // align = uintptr(typ.align) 917 // but that's only 4 on 32-bit platforms, 918 // even if there's a uint64 field in typ (see #599). 919 // This causes 64-bit atomic accesses to panic. 920 // Hence, we use stricter alignment that matches 921 // the normal allocator better. 922 if size&7 == 0 { 923 align = 8 924 } else if size&3 == 0 { 925 align = 4 926 } else if size&1 == 0 { 927 align = 2 928 } else { 929 align = 1 930 } 931 } 932 return persistentalloc(size, align, &memstats.other_sys) 933 } 934 935 // assistG is the G to charge for this allocation, or nil if 936 // GC is not currently active. 937 var assistG *g 938 if gcBlackenEnabled != 0 { 939 // Charge the current user G for this allocation. 940 assistG = getg() 941 if assistG.m.curg != nil { 942 assistG = assistG.m.curg 943 } 944 // Charge the allocation against the G. We'll account 945 // for internal fragmentation at the end of mallocgc. 946 assistG.gcAssistBytes -= int64(size) 947 948 if assistG.gcAssistBytes < 0 { 949 // This G is in debt. Assist the GC to correct 950 // this before allocating. This must happen 951 // before disabling preemption. 952 gcAssistAlloc(assistG) 953 } 954 } 955 956 // Set mp.mallocing to keep from being preempted by GC. 957 mp := acquirem() 958 if mp.mallocing != 0 { 959 throw("malloc deadlock") 960 } 961 if mp.gsignal == getg() { 962 throw("malloc during signal") 963 } 964 mp.mallocing = 1 965 966 shouldhelpgc := false 967 dataSize := size 968 var c *mcache 969 if mp.p != 0 { 970 c = mp.p.ptr().mcache 971 } else { 972 // We will be called without a P while bootstrapping, 973 // in which case we use mcache0, which is set in mallocinit. 974 // mcache0 is cleared when bootstrapping is complete, 975 // by procresize. 976 c = mcache0 977 if c == nil { 978 throw("malloc called with no P") 979 } 980 } 981 var span *mspan 982 var x unsafe.Pointer 983 noscan := typ == nil || typ.ptrdata == 0 984 if size <= maxSmallSize { 985 if noscan && size < maxTinySize { 986 // Tiny allocator. 987 // 988 // Tiny allocator combines several tiny allocation requests 989 // into a single memory block. The resulting memory block 990 // is freed when all subobjects are unreachable. The subobjects 991 // must be noscan (don't have pointers), this ensures that 992 // the amount of potentially wasted memory is bounded. 993 // 994 // Size of the memory block used for combining (maxTinySize) is tunable. 995 // Current setting is 16 bytes, which relates to 2x worst case memory 996 // wastage (when all but one subobjects are unreachable). 997 // 8 bytes would result in no wastage at all, but provides less 998 // opportunities for combining. 999 // 32 bytes provides more opportunities for combining, 1000 // but can lead to 4x worst case wastage. 1001 // The best case winning is 8x regardless of block size. 1002 // 1003 // Objects obtained from tiny allocator must not be freed explicitly. 1004 // So when an object will be freed explicitly, we ensure that 1005 // its size >= maxTinySize. 1006 // 1007 // SetFinalizer has a special case for objects potentially coming 1008 // from tiny allocator, it such case it allows to set finalizers 1009 // for an inner byte of a memory block. 1010 // 1011 // The main targets of tiny allocator are small strings and 1012 // standalone escaping variables. On a json benchmark 1013 // the allocator reduces number of allocations by ~12% and 1014 // reduces heap size by ~20%. 1015 off := c.tinyoffset 1016 // Align tiny pointer for required (conservative) alignment. 1017 if size&7 == 0 { 1018 off = alignUp(off, 8) 1019 } else if size&3 == 0 { 1020 off = alignUp(off, 4) 1021 } else if size&1 == 0 { 1022 off = alignUp(off, 2) 1023 } 1024 if off+size <= maxTinySize && c.tiny != 0 { 1025 // The object fits into existing tiny block. 1026 x = unsafe.Pointer(c.tiny + off) 1027 c.tinyoffset = off + size 1028 c.local_tinyallocs++ 1029 mp.mallocing = 0 1030 releasem(mp) 1031 return x 1032 } 1033 // Allocate a new maxTinySize block. 1034 span = c.alloc[tinySpanClass] 1035 v := nextFreeFast(span) 1036 if v == 0 { 1037 v, span, shouldhelpgc = c.nextFree(tinySpanClass) 1038 } 1039 x = unsafe.Pointer(v) 1040 (*[2]uint64)(x)[0] = 0 1041 (*[2]uint64)(x)[1] = 0 1042 // See if we need to replace the existing tiny block with the new one 1043 // based on amount of remaining free space. 1044 if size < c.tinyoffset || c.tiny == 0 { 1045 c.tiny = uintptr(x) 1046 c.tinyoffset = size 1047 } 1048 size = maxTinySize 1049 } else { 1050 var sizeclass uint8 1051 if size <= smallSizeMax-8 { 1052 sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)] 1053 } else { 1054 sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)] 1055 } 1056 size = uintptr(class_to_size[sizeclass]) 1057 spc := makeSpanClass(sizeclass, noscan) 1058 span = c.alloc[spc] 1059 v := nextFreeFast(span) 1060 if v == 0 { 1061 v, span, shouldhelpgc = c.nextFree(spc) 1062 } 1063 x = unsafe.Pointer(v) 1064 if needzero && span.needzero != 0 { 1065 memclrNoHeapPointers(unsafe.Pointer(v), size) 1066 } 1067 } 1068 } else { 1069 shouldhelpgc = true 1070 systemstack(func() { 1071 span = largeAlloc(size, needzero, noscan) 1072 }) 1073 span.freeindex = 1 1074 span.allocCount = 1 1075 x = unsafe.Pointer(span.base()) 1076 size = span.elemsize 1077 } 1078 1079 var scanSize uintptr 1080 if !noscan { 1081 // If allocating a defer+arg block, now that we've picked a malloc size 1082 // large enough to hold everything, cut the "asked for" size down to 1083 // just the defer header, so that the GC bitmap will record the arg block 1084 // as containing nothing at all (as if it were unused space at the end of 1085 // a malloc block caused by size rounding). 1086 // The defer arg areas are scanned as part of scanstack. 1087 if typ == deferType { 1088 dataSize = unsafe.Sizeof(_defer{}) 1089 } 1090 heapBitsSetType(uintptr(x), size, dataSize, typ) 1091 if dataSize > typ.size { 1092 // Array allocation. If there are any 1093 // pointers, GC has to scan to the last 1094 // element. 1095 if typ.ptrdata != 0 { 1096 scanSize = dataSize - typ.size + typ.ptrdata 1097 } 1098 } else { 1099 scanSize = typ.ptrdata 1100 } 1101 c.local_scan += scanSize 1102 } 1103 1104 // Ensure that the stores above that initialize x to 1105 // type-safe memory and set the heap bits occur before 1106 // the caller can make x observable to the garbage 1107 // collector. Otherwise, on weakly ordered machines, 1108 // the garbage collector could follow a pointer to x, 1109 // but see uninitialized memory or stale heap bits. 1110 publicationBarrier() 1111 1112 // Allocate black during GC. 1113 // All slots hold nil so no scanning is needed. 1114 // This may be racing with GC so do it atomically if there can be 1115 // a race marking the bit. 1116 if gcphase != _GCoff { 1117 gcmarknewobject(span, uintptr(x), size, scanSize) 1118 } 1119 1120 if raceenabled { 1121 racemalloc(x, size) 1122 } 1123 1124 if msanenabled { 1125 msanmalloc(x, size) 1126 } 1127 1128 mp.mallocing = 0 1129 releasem(mp) 1130 1131 if debug.allocfreetrace != 0 { 1132 tracealloc(x, size, typ) 1133 } 1134 1135 if rate := MemProfileRate; rate > 0 { 1136 if rate != 1 && size < c.next_sample { 1137 c.next_sample -= size 1138 } else { 1139 mp := acquirem() 1140 profilealloc(mp, x, size) 1141 releasem(mp) 1142 } 1143 } 1144 1145 if assistG != nil { 1146 // Account for internal fragmentation in the assist 1147 // debt now that we know it. 1148 assistG.gcAssistBytes -= int64(size - dataSize) 1149 } 1150 1151 if shouldhelpgc { 1152 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1153 gcStart(t) 1154 } 1155 } 1156 1157 return x 1158 } 1159 1160 func largeAlloc(size uintptr, needzero bool, noscan bool) *mspan { 1161 // print("largeAlloc size=", size, "\n") 1162 1163 if size+_PageSize < size { 1164 throw("out of memory") 1165 } 1166 npages := size >> _PageShift 1167 if size&_PageMask != 0 { 1168 npages++ 1169 } 1170 1171 // Deduct credit for this span allocation and sweep if 1172 // necessary. mHeap_Alloc will also sweep npages, so this only 1173 // pays the debt down to npage pages. 1174 deductSweepCredit(npages*_PageSize, npages) 1175 1176 spc := makeSpanClass(0, noscan) 1177 s := mheap_.alloc(npages, spc, needzero) 1178 if s == nil { 1179 throw("out of memory") 1180 } 1181 if go115NewMCentralImpl { 1182 // Put the large span in the mcentral swept list so that it's 1183 // visible to the background sweeper. 1184 mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s) 1185 } 1186 s.limit = s.base() + size 1187 heapBitsForAddr(s.base()).initSpan(s) 1188 return s 1189 } 1190 1191 // implementation of new builtin 1192 // compiler (both frontend and SSA backend) knows the signature 1193 // of this function 1194 func newobject(typ *_type) unsafe.Pointer { 1195 return mallocgc(typ.size, typ, true) 1196 } 1197 1198 //go:linkname reflect_unsafe_New reflect.unsafe_New 1199 func reflect_unsafe_New(typ *_type) unsafe.Pointer { 1200 return mallocgc(typ.size, typ, true) 1201 } 1202 1203 //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New 1204 func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { 1205 return mallocgc(typ.size, typ, true) 1206 } 1207 1208 // newarray allocates an array of n elements of type typ. 1209 func newarray(typ *_type, n int) unsafe.Pointer { 1210 if n == 1 { 1211 return mallocgc(typ.size, typ, true) 1212 } 1213 mem, overflow := math.MulUintptr(typ.size, uintptr(n)) 1214 if overflow || mem > maxAlloc || n < 0 { 1215 panic(plainError("runtime: allocation size out of range")) 1216 } 1217 return mallocgc(mem, typ, true) 1218 } 1219 1220 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 1221 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 1222 return newarray(typ, n) 1223 } 1224 1225 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 1226 var c *mcache 1227 if mp.p != 0 { 1228 c = mp.p.ptr().mcache 1229 } else { 1230 c = mcache0 1231 if c == nil { 1232 throw("profilealloc called with no P") 1233 } 1234 } 1235 c.next_sample = nextSample() 1236 mProf_Malloc(x, size) 1237 } 1238 1239 // nextSample returns the next sampling point for heap profiling. The goal is 1240 // to sample allocations on average every MemProfileRate bytes, but with a 1241 // completely random distribution over the allocation timeline; this 1242 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson 1243 // processes, the distance between two samples follows the exponential 1244 // distribution (exp(MemProfileRate)), so the best return value is a random 1245 // number taken from an exponential distribution whose mean is MemProfileRate. 1246 func nextSample() uintptr { 1247 if GOOS == "plan9" { 1248 // Plan 9 doesn't support floating point in note handler. 1249 if g := getg(); g == g.m.gsignal { 1250 return nextSampleNoFP() 1251 } 1252 } 1253 1254 return uintptr(fastexprand(MemProfileRate)) 1255 } 1256 1257 // fastexprand returns a random number from an exponential distribution with 1258 // the specified mean. 1259 func fastexprand(mean int) int32 { 1260 // Avoid overflow. Maximum possible step is 1261 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean. 1262 switch { 1263 case mean > 0x7000000: 1264 mean = 0x7000000 1265 case mean == 0: 1266 return 0 1267 } 1268 1269 // Take a random sample of the exponential distribution exp(-mean*x). 1270 // The probability distribution function is mean*exp(-mean*x), so the CDF is 1271 // p = 1 - exp(-mean*x), so 1272 // q = 1 - p == exp(-mean*x) 1273 // log_e(q) = -mean*x 1274 // -log_e(q)/mean = x 1275 // x = -log_e(q) * mean 1276 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency 1277 const randomBitCount = 26 1278 q := fastrand()%(1<<randomBitCount) + 1 1279 qlog := fastlog2(float64(q)) - randomBitCount 1280 if qlog > 0 { 1281 qlog = 0 1282 } 1283 const minusLog2 = -0.6931471805599453 // -ln(2) 1284 return int32(qlog*(minusLog2*float64(mean))) + 1 1285 } 1286 1287 // nextSampleNoFP is similar to nextSample, but uses older, 1288 // simpler code to avoid floating point. 1289 func nextSampleNoFP() uintptr { 1290 // Set first allocation sample size. 1291 rate := MemProfileRate 1292 if rate > 0x3fffffff { // make 2*rate not overflow 1293 rate = 0x3fffffff 1294 } 1295 if rate != 0 { 1296 return uintptr(fastrand() % uint32(2*rate)) 1297 } 1298 return 0 1299 } 1300 1301 type persistentAlloc struct { 1302 base *notInHeap 1303 off uintptr 1304 } 1305 1306 var globalAlloc struct { 1307 mutex 1308 persistentAlloc 1309 } 1310 1311 // persistentChunkSize is the number of bytes we allocate when we grow 1312 // a persistentAlloc. 1313 const persistentChunkSize = 256 << 10 1314 1315 // persistentChunks is a list of all the persistent chunks we have 1316 // allocated. The list is maintained through the first word in the 1317 // persistent chunk. This is updated atomically. 1318 var persistentChunks *notInHeap 1319 1320 // Wrapper around sysAlloc that can allocate small chunks. 1321 // There is no associated free operation. 1322 // Intended for things like function/type/debug-related persistent data. 1323 // If align is 0, uses default align (currently 8). 1324 // The returned memory will be zeroed. 1325 // 1326 // Consider marking persistentalloc'd types go:notinheap. 1327 func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { 1328 var p *notInHeap 1329 systemstack(func() { 1330 p = persistentalloc1(size, align, sysStat) 1331 }) 1332 return unsafe.Pointer(p) 1333 } 1334 1335 // Must run on system stack because stack growth can (re)invoke it. 1336 // See issue 9174. 1337 //go:systemstack 1338 func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { 1339 const ( 1340 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 1341 ) 1342 1343 if size == 0 { 1344 throw("persistentalloc: size == 0") 1345 } 1346 if align != 0 { 1347 if align&(align-1) != 0 { 1348 throw("persistentalloc: align is not a power of 2") 1349 } 1350 if align > _PageSize { 1351 throw("persistentalloc: align is too large") 1352 } 1353 } else { 1354 align = 8 1355 } 1356 1357 if size >= maxBlock { 1358 return (*notInHeap)(sysAlloc(size, sysStat)) 1359 } 1360 1361 mp := acquirem() 1362 var persistent *persistentAlloc 1363 if mp != nil && mp.p != 0 { 1364 persistent = &mp.p.ptr().palloc 1365 } else { 1366 lock(&globalAlloc.mutex) 1367 persistent = &globalAlloc.persistentAlloc 1368 } 1369 persistent.off = alignUp(persistent.off, align) 1370 if persistent.off+size > persistentChunkSize || persistent.base == nil { 1371 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys)) 1372 if persistent.base == nil { 1373 if persistent == &globalAlloc.persistentAlloc { 1374 unlock(&globalAlloc.mutex) 1375 } 1376 throw("runtime: cannot allocate memory") 1377 } 1378 1379 // Add the new chunk to the persistentChunks list. 1380 for { 1381 chunks := uintptr(unsafe.Pointer(persistentChunks)) 1382 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks 1383 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { 1384 break 1385 } 1386 } 1387 persistent.off = alignUp(sys.PtrSize, align) 1388 } 1389 p := persistent.base.add(persistent.off) 1390 persistent.off += size 1391 releasem(mp) 1392 if persistent == &globalAlloc.persistentAlloc { 1393 unlock(&globalAlloc.mutex) 1394 } 1395 1396 if sysStat != &memstats.other_sys { 1397 mSysStatInc(sysStat, size) 1398 mSysStatDec(&memstats.other_sys, size) 1399 } 1400 return p 1401 } 1402 1403 // inPersistentAlloc reports whether p points to memory allocated by 1404 // persistentalloc. This must be nosplit because it is called by the 1405 // cgo checker code, which is called by the write barrier code. 1406 //go:nosplit 1407 func inPersistentAlloc(p uintptr) bool { 1408 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) 1409 for chunk != 0 { 1410 if p >= chunk && p < chunk+persistentChunkSize { 1411 return true 1412 } 1413 chunk = *(*uintptr)(unsafe.Pointer(chunk)) 1414 } 1415 return false 1416 } 1417 1418 // linearAlloc is a simple linear allocator that pre-reserves a region 1419 // of memory and then maps that region into the Ready state as needed. The 1420 // caller is responsible for locking. 1421 type linearAlloc struct { 1422 next uintptr // next free byte 1423 mapped uintptr // one byte past end of mapped space 1424 end uintptr // end of reserved space 1425 } 1426 1427 func (l *linearAlloc) init(base, size uintptr) { 1428 if base+size < base { 1429 // Chop off the last byte. The runtime isn't prepared 1430 // to deal with situations where the bounds could overflow. 1431 // Leave that memory reserved, though, so we don't map it 1432 // later. 1433 size -= 1 1434 } 1435 l.next, l.mapped = base, base 1436 l.end = base + size 1437 } 1438 1439 func (l *linearAlloc) alloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { 1440 p := alignUp(l.next, align) 1441 if p+size > l.end { 1442 return nil 1443 } 1444 l.next = p + size 1445 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped { 1446 // Transition from Reserved to Prepared to Ready. 1447 sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat) 1448 sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped) 1449 l.mapped = pEnd 1450 } 1451 return unsafe.Pointer(p) 1452 } 1453 1454 // notInHeap is off-heap memory allocated by a lower-level allocator 1455 // like sysAlloc or persistentAlloc. 1456 // 1457 // In general, it's better to use real types marked as go:notinheap, 1458 // but this serves as a generic type for situations where that isn't 1459 // possible (like in the allocators). 1460 // 1461 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? 1462 // 1463 //go:notinheap 1464 type notInHeap struct{} 1465 1466 func (p *notInHeap) add(bytes uintptr) *notInHeap { 1467 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) 1468 } 1469