Package runtime
Overview ▹
Index ▹
Constants
const ( c0 = uintptr((8-sys.PtrSize)/4*2860486313 + (sys.PtrSize-4)/4*33054211828000289) c1 = uintptr((8-sys.PtrSize)/4*3267000013 + (sys.PtrSize-4)/4*23344194077549503) )
type algorithms - known to compiler
const ( alg_NOEQ = iota alg_MEM0 alg_MEM8 alg_MEM16 alg_MEM32 alg_MEM64 alg_MEM128 alg_STRING alg_INTER alg_NILINTER alg_FLOAT32 alg_FLOAT64 alg_CPLX64 alg_CPLX128 alg_max )
const ( maxAlign = 8 hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1)) debugChan = false )
Offsets into internal/cpu records for use in assembly.
const ( offsetX86HasAVX = unsafe.Offsetof(cpu.X86.HasAVX) offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2) offsetX86HasERMS = unsafe.Offsetof(cpu.X86.HasERMS) offsetX86HasSSE2 = unsafe.Offsetof(cpu.X86.HasSSE2) offsetARMHasIDIVA = unsafe.Offsetof(cpu.ARM.HasIDIVA) offsetMIPS64XHasMSA = unsafe.Offsetof(cpu.MIPS64X.HasMSA) )
const ( debugCallSystemStack = "executing on Go runtime stack" debugCallUnknownFunc = "call from unknown function" debugCallRuntime = "call from within the Go runtime" debugCallUnsafePoint = "call not at safe point" )
const ( debugLogUnknown = 1 + iota debugLogBoolTrue debugLogBoolFalse debugLogInt debugLogUint debugLogHex debugLogPtr debugLogString debugLogConstString debugLogStringOverflow debugLogPC debugLogTraceback )
const ( // debugLogHeaderSize is the number of bytes in the framing // header of every dlog record. debugLogHeaderSize = 2 // debugLogSyncSize is the number of bytes in a sync record. debugLogSyncSize = debugLogHeaderSize + 2*8 )
const ( _EINTR = 0x4 _EAGAIN = 0xb _ENOMEM = 0xc _ENOSYS = 0x26 _PROT_NONE = 0x0 _PROT_READ = 0x1 _PROT_WRITE = 0x2 _PROT_EXEC = 0x4 _MAP_ANON = 0x20 _MAP_PRIVATE = 0x2 _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf _SA_RESTART = 0x10000000 _SA_ONSTACK = 0x8000000 _SA_RESTORER = 0x4000000 _SA_SIGINFO = 0x4 _SIGHUP = 0x1 _SIGINT = 0x2 _SIGQUIT = 0x3 _SIGILL = 0x4 _SIGTRAP = 0x5 _SIGABRT = 0x6 _SIGBUS = 0x7 _SIGFPE = 0x8 _SIGKILL = 0x9 _SIGUSR1 = 0xa _SIGSEGV = 0xb _SIGUSR2 = 0xc _SIGPIPE = 0xd _SIGALRM = 0xe _SIGSTKFLT = 0x10 _SIGCHLD = 0x11 _SIGCONT = 0x12 _SIGSTOP = 0x13 _SIGTSTP = 0x14 _SIGTTIN = 0x15 _SIGTTOU = 0x16 _SIGURG = 0x17 _SIGXCPU = 0x18 _SIGXFSZ = 0x19 _SIGVTALRM = 0x1a _SIGPROF = 0x1b _SIGWINCH = 0x1c _SIGIO = 0x1d _SIGPWR = 0x1e _SIGSYS = 0x1f _FPE_INTDIV = 0x1 _FPE_INTOVF = 0x2 _FPE_FLTDIV = 0x3 _FPE_FLTOVF = 0x4 _FPE_FLTUND = 0x5 _FPE_FLTRES = 0x6 _FPE_FLTINV = 0x7 _FPE_FLTSUB = 0x8 _BUS_ADRALN = 0x1 _BUS_ADRERR = 0x2 _BUS_OBJERR = 0x3 _SEGV_MAPERR = 0x1 _SEGV_ACCERR = 0x2 _ITIMER_REAL = 0x0 _ITIMER_VIRTUAL = 0x1 _ITIMER_PROF = 0x2 _EPOLLIN = 0x1 _EPOLLOUT = 0x4 _EPOLLERR = 0x8 _EPOLLHUP = 0x10 _EPOLLRDHUP = 0x2000 _EPOLLET = 0x80000000 _EPOLL_CLOEXEC = 0x80000 _EPOLL_CTL_ADD = 0x1 _EPOLL_CTL_DEL = 0x2 _EPOLL_CTL_MOD = 0x3 _AF_UNIX = 0x1 _SOCK_DGRAM = 0x2 )
const ( _O_RDONLY = 0x0 _O_NONBLOCK = 0x800 _O_CLOEXEC = 0x80000 )
const ( // Constants for multiplication: four random odd 64-bit numbers. m1 = 16877499708836156737 m2 = 2820277070424839065 m3 = 9497967016996688599 m4 = 15839092249703872147 )
const ( fieldKindEol = 0 fieldKindPtr = 1 fieldKindIface = 2 fieldKindEface = 3 tagEOF = 0 tagObject = 1 tagOtherRoot = 2 tagType = 3 tagGoroutine = 4 tagStackFrame = 5 tagParams = 6 tagFinalizer = 7 tagItab = 8 tagOSThread = 9 tagMemStats = 10 tagQueuedFinalizer = 11 tagData = 12 tagBSS = 13 tagDefer = 14 tagPanic = 15 tagMemProf = 16 tagAllocSample = 17 )
Cache of types that have been serialized already. We use a type's hash field to pick a bucket. Inside a bucket, we keep a list of types that have been serialized so far, most recently used first. Note: when a bucket overflows we may end up serializing a type more than once. That's ok.
const ( typeCacheBuckets = 256 typeCacheAssoc = 4 )
const ( // addrBits is the number of bits needed to represent a virtual address. // // See heapAddrBits for a table of address space sizes on // various architectures. 48 bits is enough for all // architectures except s390x. // // On AMD64, virtual addresses are 48-bit (or 57-bit) numbers sign extended to 64. // We shift the address left 16 to eliminate the sign extended part and make // room in the bottom for the count. // // On s390x, virtual addresses are 64-bit. There's not much we // can do about this, so we just hope that the kernel doesn't // get to really high addresses and panic if it does. addrBits = 48 // In addition to the 16 bits taken from the top, we can take 3 from the // bottom, because node must be pointer-aligned, giving a total of 19 bits // of count. cntBits = 64 - addrBits + 3 // On AIX, 64-bit addresses are split into 36-bit segment number and 28-bit // offset in segment. Segment numbers in the range 0x0A0000000-0x0AFFFFFFF(LSA) // are available for mmap. // We assume all lfnode addresses are from memory allocated with mmap. // We use one bit to distinguish between the two ranges. aixAddrBits = 57 aixCntBits = 64 - aixAddrBits + 3 )
const ( mutex_unlocked = 0 mutex_locked = 1 mutex_sleeping = 2 active_spin = 4 active_spin_cnt = 30 passive_spin = 1 )
const ( debugMalloc = false maxTinySize = _TinySize tinySizeClass = _TinySizeClass maxSmallSize = _MaxSmallSize pageShift = _PageShift pageSize = _PageSize pageMask = _PageMask // By construction, single page spans of the smallest object class // have the most objects per span. maxObjsPerSpan = pageSize / 8 concurrentSweep = _ConcurrentSweep _PageSize = 1 << _PageShift _PageMask = _PageSize - 1 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems _64bit = 1 << (^uintptr(0) >> 63) / 2 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. _TinySize = 16 _TinySizeClass = int8(2) _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc // Per-P, per order stack segment cache size. _StackCacheSize = 32 * 1024 // Number of orders that get caching. Order 0 is FixedStack // and each successive order is twice as large. // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks // will be allocated directly. // Since FixedStack is different on different systems, we // must vary NumStackOrders to keep the same maximum cached size. // OS | FixedStack | NumStackOrders // -----------------+------------+--------------- // linux/darwin/bsd | 2KB | 4 // windows/32 | 4KB | 3 // windows/64 | 8KB | 2 // plan9 | 4KB | 3 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9 // heapAddrBits is the number of bits in a heap address. On // amd64, addresses are sign-extended beyond heapAddrBits. On // other arches, they are zero-extended. // // On most 64-bit platforms, we limit this to 48 bits based on a // combination of hardware and OS limitations. // // amd64 hardware limits addresses to 48 bits, sign-extended // to 64 bits. Addresses where the top 16 bits are not either // all 0 or all 1 are "non-canonical" and invalid. Because of // these "negative" addresses, we offset addresses by 1<<47 // (arenaBaseOffset) on amd64 before computing indexes into // the heap arenas index. In 2017, amd64 hardware added // support for 57 bit addresses; however, currently only Linux // supports this extension and the kernel will never choose an // address above 1<<47 unless mmap is called with a hint // address above 1<<47 (which we never do). // // arm64 hardware (as of ARMv8) limits user addresses to 48 // bits, in the range [0, 1<<48). // // ppc64, mips64, and s390x support arbitrary 64 bit addresses // in hardware. On Linux, Go leans on stricter OS limits. Based // on Linux's processor.h, the user address space is limited as // follows on 64-bit architectures: // // Architecture Name Maximum Value (exclusive) // --------------------------------------------------------------------- // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses) // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses) // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses) // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) // s390x TASK_SIZE 1<<64 (64 bit addresses) // // These limits may increase over time, but are currently at // most 48 bits except on s390x. On all architectures, Linux // starts placing mmap'd regions at addresses that are // significantly below 48 bits, so even if it's possible to // exceed Go's 48 bit limit, it's extremely unlikely in // practice. // // On 32-bit platforms, we accept the full 32-bit address // space because doing so is cheap. // mips32 only has access to the low 2GB of virtual memory, so // we further limit it to 31 bits. // // On darwin/arm64, although 64-bit pointers are presumably // available, pointers are truncated to 33 bits. Furthermore, // only the top 4 GiB of the address space are actually available // to the application, but we allow the whole 33 bits anyway for // simplicity. // TODO(mknyszek): Consider limiting it to 32 bits and using // arenaBaseOffset to offset into the top 4 GiB. // // WebAssembly currently has a limit of 4GB linear memory. heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosDarwin*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosDarwin*sys.GoarchArm64 // maxAlloc is the maximum size of an allocation. On 64-bit, // it's theoretically possible to allocate 1<<heapAddrBits bytes. On // 32-bit, however, this is one less than 1<<32 because the // number of bytes in the address space doesn't actually fit // in a uintptr. maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1 // heapArenaBytes is the size of a heap arena. The heap // consists of mappings of size heapArenaBytes, aligned to // heapArenaBytes. The initial heap mapping is one arena. // // This is currently 64MB on 64-bit non-Windows and 4MB on // 32-bit and on Windows. We use smaller arenas on Windows // because all committed memory is charged to the process, // even if it's not touched. Hence, for processes with small // heaps, the mapped arena space needs to be commensurate. // This is particularly important with the race detector, // since it significantly amplifies the cost of committed // memory. heapArenaBytes = 1 << logHeapArenaBytes // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity, // prefer using heapArenaBytes where possible (we need the // constant to compute some other constants). logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm // heapArenaBitmapBytes is the size of each heap arena's bitmap. heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2) pagesPerArena = heapArenaBytes / pageSize // arenaL1Bits is the number of bits of the arena number // covered by the first level arena map. // // This number should be small, since the first level arena // map requires PtrSize*(1<<arenaL1Bits) of space in the // binary's BSS. It can be zero, in which case the first level // index is effectively unused. There is a performance benefit // to this, since the generated code can be more efficient, // but comes at the cost of having a large L2 mapping. // // We use the L1 map on 64-bit Windows because the arena size // is small, but the address space is still 48 bits, and // there's a high cost to having a large L2. arenaL1Bits = 6 * (_64bit * sys.GoosWindows) // arenaL2Bits is the number of bits of the arena number // covered by the second level arena index. // // The size of each arena map allocation is proportional to // 1<<arenaL2Bits, so it's important that this not be too // large. 48 bits leads to 32MB arena index allocations, which // is about the practical threshold. arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits // arenaL1Shift is the number of bits to shift an arena frame // number by to compute an index into the first level arena map. arenaL1Shift = arenaL2Bits // arenaBits is the total bits in a combined arena map index. // This is split between the index into the L1 arena map and // the L2 arena map. arenaBits = arenaL1Bits + arenaL2Bits // arenaBaseOffset is the pointer value that corresponds to // index 0 in the heap arena map. // // On amd64, the address space is 48 bits, sign extended to 64 // bits. This offset lets us handle "negative" addresses (or // high addresses if viewed as unsigned). // // On aix/ppc64, this offset allows to keep the heapAddrBits to // 48. Otherwize, it would be 60 in order to handle mmap addresses // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this // case, the memory reserved in (s *pageAlloc).init for chunks // is causing important slowdowns. // // On other platforms, the user address space is contiguous // and starts at 0, so no offset is necessary. arenaBaseOffset = 0xffff800000000000*sys.GoarchAmd64 + 0x0a00000000000000*sys.GoosAix // A typed version of this constant that will make it into DWARF (for viewcore). arenaBaseOffsetUintptr = uintptr(arenaBaseOffset) // Max number of threads to run garbage collection. // 2, 3, and 4 are all plausible maximums depending // on the hardware details of the machine. The garbage // collector scales well to 32 cpus. _MaxGcproc = 32 // minLegalPointer is the smallest possible legal pointer. // This is the smallest possible architectural page size, // since we assume that the first page is never mapped. // // This should agree with minZeroPage in the compiler. minLegalPointer uintptr = 4096 )
const ( // Maximum number of key/elem pairs a bucket can hold. bucketCntBits = 3 bucketCnt = 1 << bucketCntBits // Maximum average load of a bucket that triggers growth is 6.5. // Represent as loadFactorNum/loadFactorDen, to allow integer math. loadFactorNum = 13 loadFactorDen = 2 // Maximum key or elem size to keep inline (instead of mallocing per element). // Must fit in a uint8. // Fast versions cannot handle big elems - the cutoff size for // fast versions in cmd/compile/internal/gc/walk.go must be at most this elem. maxKeySize = 128 maxElemSize = 128 // data offset should be the size of the bmap struct, but needs to be // aligned correctly. For amd64p32 this means 64-bit alignment // even though pointers are 32 bit. dataOffset = unsafe.Offsetof(struct { b bmap v int64 }{}.v) // Possible tophash values. We reserve a few possibilities for special marks. // Each bucket (including its overflow buckets, if any) will have either all or none of its // entries in the evacuated* states (except during the evacuate() method, which only happens // during map writes and thus no one else can observe the map during that time). emptyRest = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows. emptyOne = 1 // this cell is empty evacuatedX = 2 // key/elem is valid. Entry has been evacuated to first half of larger table. evacuatedY = 3 // same as above, but evacuated to second half of larger table. evacuatedEmpty = 4 // cell is empty, bucket is evacuated. minTopHash = 5 // minimum tophash for a normal filled cell. // flags iterator = 1 // there may be an iterator using buckets oldIterator = 2 // there may be an iterator using oldbuckets hashWriting = 4 // a goroutine is writing to the map sameSizeGrow = 8 // the current map growth is to a new map of the same size // sentinel bucket ID for iterator checks noCheck = 1<<(8*sys.PtrSize) - 1 )
const ( bitPointer = 1 << 0 bitScan = 1 << 4 heapBitsShift = 1 // shift offset between successive bitPointer or bitScan entries wordsPerBitmapByte = 8 / 2 // heap words described by one bitmap byte // all scan/pointer bits in a byte bitScanAll = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift) bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift) )
const ( _EACCES = 13 _EINVAL = 22 )
const ( _DebugGC = 0 _ConcurrentSweep = true _FinBlockSize = 4 * 1024 // debugScanConservative enables debug logging for stack // frames that are scanned conservatively. debugScanConservative = false // sweepMinHeapDistance is a lower bound on the heap distance // (in bytes) reserved for concurrent sweeping between GC // cycles. sweepMinHeapDistance = 1024 * 1024 )
const ( _GCoff = iota // GC not running; sweeping in background, write barrier disabled _GCmark // GC marking roots and workbufs: allocate black, write barrier ENABLED _GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED )
const ( fixedRootFinalizers = iota fixedRootFreeGStacks fixedRootCount // rootBlockBytes is the number of bytes to scan per data or // BSS root. rootBlockBytes = 256 << 10 // maxObletBytes is the maximum bytes of an object to scan at // once. Larger objects will be split up into "oblets" of at // most this size. Since we can scan 1–2 MB/ms, 128 KB bounds // scan preemption at ~100 µs. // // This must be > _MaxSmallSize so that the object base is the // span base. maxObletBytes = 128 << 10 // drainCheckThreshold specifies how many units of work to do // between self-preemption checks in gcDrain. Assuming a scan // rate of 1 MB/ms, this is ~100 µs. Lower values have higher // overhead in the scan loop (the scheduler check may perform // a syscall, so its overhead is nontrivial). Higher values // make the system less responsive to incoming work. drainCheckThreshold = 100000 // pagesPerSpanRoot indicates how many pages to scan from a span root // at a time. Used by special root marking. // // Higher values improve throughput by increasing locality, but // increase the minimum latency of a marking operation. // // Must be a multiple of the pageInUse bitmap element size and // must also evenly divide pagesPerArena. pagesPerSpanRoot = 512 // go115NewMarkrootSpans is a feature flag that indicates whether // to use the new bitmap-based markrootSpans implementation. go115NewMarkrootSpans = true )
const ( // The background scavenger is paced according to these parameters. // // scavengePercent represents the portion of mutator time we're willing // to spend on scavenging in percent. scavengePercent = 1 // 1% // retainExtraPercent represents the amount of memory over the heap goal // that the scavenger should keep as a buffer space for the allocator. // // The purpose of maintaining this overhead is to have a greater pool of // unscavenged memory available for allocation (since using scavenged memory // incurs an additional cost), to account for heap fragmentation and // the ever-changing layout of the heap. retainExtraPercent = 10 // maxPagesPerPhysPage is the maximum number of supported runtime pages per // physical page, based on maxPhysPageSize. maxPagesPerPhysPage = maxPhysPageSize / pageSize // scavengeCostRatio is the approximate ratio between the costs of using previously // scavenged memory and scavenging memory. // // For most systems the cost of scavenging greatly outweighs the costs // associated with using scavenged memory, making this constant 0. On other systems // (especially ones where "sysUsed" is not just a no-op) this cost is non-trivial. // // This ratio is used as part of multiplicative factor to help the scavenger account // for the additional costs of using scavenged memory in its pacing. scavengeCostRatio = 0.7 * sys.GoosDarwin // scavengeReservationShards determines the amount of memory the scavenger // should reserve for scavenging at a time. Specifically, the amount of // memory reserved is (heap size in bytes) / scavengeReservationShards. scavengeReservationShards = 64 )
const ( gcSweepBlockEntries = 512 // 4KB on 64-bit gcSweepBufInitSpineCap = 256 // Enough for 1GB heap on 64-bit )
const ( _WorkbufSize = 2048 // in bytes; larger values result in less contention // workbufAlloc is the number of bytes to allocate at a time // for new workbufs. This must be a multiple of pageSize and // should be a multiple of _WorkbufSize. // // Larger values reduce workbuf allocation overhead. Smaller // values reduce heap fragmentation. workbufAlloc = 32 << 10 )
const ( // minPhysPageSize is a lower-bound on the physical page size. The // true physical page size may be larger than this. In contrast, // sys.PhysPageSize is an upper-bound on the physical page size. minPhysPageSize = 4096 // maxPhysPageSize is the maximum page size the runtime supports. maxPhysPageSize = 512 << 10 // maxPhysHugePageSize sets an upper-bound on the maximum huge page size // that the runtime supports. maxPhysHugePageSize = pallocChunkBytes // pagesPerReclaimerChunk indicates how many pages to scan from the // pageInUse bitmap at a time. Used by the page reclaimer. // // Higher values reduce contention on scanning indexes (such as // h.reclaimIndex), but increase the minimum latency of the // operation. // // The time required to scan this many pages can vary a lot depending // on how many spans are actually freed. Experimentally, it can // scan for pages at ~300 GB/ms on a 2.6GHz Core i7, but can only // free spans at ~32 MB/ms. Using 512 pages bounds this at // roughly 100µs. // // Must be a multiple of the pageInUse bitmap element size and // must also evenly divid pagesPerArena. pagesPerReclaimerChunk = 512 // go115NewMCentralImpl is a feature flag for the new mcentral implementation. // // This flag depends on go115NewMarkrootSpans because the new mcentral // implementation requires that markroot spans no longer rely on mgcsweepbufs. // The definition of this flag helps ensure that if there's a problem with // the new markroot spans implementation and it gets turned off, that the new // mcentral implementation also gets turned off so the runtime isn't broken. go115NewMCentralImpl = true && go115NewMarkrootSpans )
const ( numSpanClasses = _NumSizeClasses << 1 tinySpanClass = spanClass(tinySizeClass<<1 | 1) )
const ( _KindSpecialFinalizer = 1 _KindSpecialProfile = 2 )
const ( // The size of a bitmap chunk, i.e. the amount of bits (that is, pages) to consider // in the bitmap at once. pallocChunkPages = 1 << logPallocChunkPages pallocChunkBytes = pallocChunkPages * pageSize logPallocChunkPages = 9 logPallocChunkBytes = logPallocChunkPages + pageShift // The number of radix bits for each level. // // The value of 3 is chosen such that the block of summaries we need to scan at // each level fits in 64 bytes (2^3 summaries * 8 bytes per summary), which is // close to the L1 cache line width on many systems. Also, a value of 3 fits 4 tree // levels perfectly into the 21-bit pallocBits summary field at the root level. // // The following equation explains how each of the constants relate: // summaryL0Bits + (summaryLevels-1)*summaryLevelBits + logPallocChunkBytes = heapAddrBits // // summaryLevels is an architecture-dependent value defined in mpagealloc_*.go. summaryLevelBits = 3 summaryL0Bits = heapAddrBits - logPallocChunkBytes - (summaryLevels-1)*summaryLevelBits // pallocChunksL2Bits is the number of bits of the chunk index number // covered by the second level of the chunks map. // // See (*pageAlloc).chunks for more details. Update the documentation // there should this change. pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits pallocChunksL1Shift = pallocChunksL2Bits )
const ( pallocSumBytes = unsafe.Sizeof(pallocSum(0)) // maxPackedValue is the maximum value that any of the three fields in // the pallocSum may take on. maxPackedValue = 1 << logMaxPackedValue logMaxPackedValue = logPallocChunkPages + (summaryLevels-1)*summaryLevelBits freeChunkSum = pallocSum(uint64(pallocChunkPages) | uint64(pallocChunkPages<<logMaxPackedValue) | uint64(pallocChunkPages<<(2*logMaxPackedValue))) )
const ( // The number of levels in the radix tree. summaryLevels = 5 // Constants for testing. pageAlloc32Bit = 0 pageAlloc64Bit = 1 // Number of bits needed to represent all indices into the L1 of the // chunks map. // // See (*pageAlloc).chunks for more details. Update the documentation // there should this number change. pallocChunksL1Bits = 13 )
const ( spanSetBlockEntries = 512 // 4KB on 64-bit spanSetInitSpineCap = 256 // Enough for 1GB heap on 64-bit )
const ( // wbBufEntries is the number of write barriers between // flushes of the write barrier buffer. // // This trades latency for throughput amortization. Higher // values amortize flushing overhead more, but increase the // latency of flushing. Higher values also increase the cache // footprint of the buffer. // // TODO: What is the latency cost of this? Tune this value. wbBufEntries = 256 // wbBufEntryPointers is the number of pointers added to the // buffer by each write barrier. wbBufEntryPointers = 2 )
Error codes returned by runtime_pollReset and runtime_pollWait. These must match the values in internal/poll/fd_poll_runtime.go.
const ( pollNoError = 0 // no error pollErrClosing = 1 // descriptor is closed pollErrTimeout = 2 // I/O timeout pollErrNotPollable = 3 // general error polling descriptor )
pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer goroutines respectively. The semaphore can be in the following states: pdReady - io readiness notification is pending;
a goroutine consumes the notification by changing the state to nil.
pdWait - a goroutine prepares to park on the semaphore, but not yet parked;
the goroutine commits to park by changing the state to G pointer, or, alternatively, concurrent io notification changes the state to pdReady, or, alternatively, concurrent timeout/close changes the state to nil.
G pointer - the goroutine is blocked on the semaphore;
io notification or timeout/close changes the state to pdReady or nil respectively and unparks the goroutine.
nil - none of the above.
const ( pdReady uintptr = 1 pdWait uintptr = 2 )
const ( _FUTEX_PRIVATE_FLAG = 128 _FUTEX_WAIT_PRIVATE = 0 | _FUTEX_PRIVATE_FLAG _FUTEX_WAKE_PRIVATE = 1 | _FUTEX_PRIVATE_FLAG )
Clone, the Linux rfork.
const ( _CLONE_VM = 0x100 _CLONE_FS = 0x200 _CLONE_FILES = 0x400 _CLONE_SIGHAND = 0x800 _CLONE_PTRACE = 0x2000 _CLONE_VFORK = 0x4000 _CLONE_PARENT = 0x8000 _CLONE_THREAD = 0x10000 _CLONE_NEWNS = 0x20000 _CLONE_SYSVSEM = 0x40000 _CLONE_SETTLS = 0x80000 _CLONE_PARENT_SETTID = 0x100000 _CLONE_CHILD_CLEARTID = 0x200000 _CLONE_UNTRACED = 0x800000 _CLONE_CHILD_SETTID = 0x1000000 _CLONE_STOPPED = 0x2000000 _CLONE_NEWUTS = 0x4000000 _CLONE_NEWIPC = 0x8000000 cloneFlags = _CLONE_VM | _CLONE_FS | _CLONE_FILES | _CLONE_SIGHAND | _CLONE_SYSVSEM | _CLONE_THREAD /* revisit - okay for now */ )
const ( _AT_NULL = 0 // End of vector _AT_PAGESZ = 6 // System physical page size _AT_HWCAP = 16 // hardware capability bit vector _AT_RANDOM = 25 // introduced in 2.6.29 _AT_HWCAP2 = 26 // hardware capability bit vector 2 )
const ( _SS_DISABLE = 2 _NSIG = 65 _SI_USER = 0 _SIG_BLOCK = 0 _SIG_UNBLOCK = 1 _SIG_SETMASK = 2 )
const ( deferHeaderSize = unsafe.Sizeof(_defer{}) minDeferAlloc = (deferHeaderSize + 15) &^ 15 minDeferArgs = minDeferAlloc - deferHeaderSize )
Keep a cached value to make gotraceback fast, since we call it on every call to gentraceback. The cached value is a uint32 in which the low bits are the "crash" and "all" settings and the remaining bits are the traceback value (0 off, 1 on, 2 include system).
const ( tracebackCrash = 1 << iota tracebackAll tracebackShift = iota )
defined constants
const ( // _Gidle means this goroutine was just allocated and has not // yet been initialized. _Gidle = iota // 0 // _Grunnable means this goroutine is on a run queue. It is // not currently executing user code. The stack is not owned. _Grunnable // 1 // _Grunning means this goroutine may execute user code. The // stack is owned by this goroutine. It is not on a run queue. // It is assigned an M and a P (g.m and g.m.p are valid). _Grunning // 2 // _Gsyscall means this goroutine is executing a system call. // It is not executing user code. The stack is owned by this // goroutine. It is not on a run queue. It is assigned an M. _Gsyscall // 3 // _Gwaiting means this goroutine is blocked in the runtime. // It is not executing user code. It is not on a run queue, // but should be recorded somewhere (e.g., a channel wait // queue) so it can be ready()d when necessary. The stack is // not owned *except* that a channel operation may read or // write parts of the stack under the appropriate channel // lock. Otherwise, it is not safe to access the stack after a // goroutine enters _Gwaiting (e.g., it may get moved). _Gwaiting // 4 // _Gmoribund_unused is currently unused, but hardcoded in gdb // scripts. _Gmoribund_unused // 5 // _Gdead means this goroutine is currently unused. It may be // just exited, on a free list, or just being initialized. It // is not executing user code. It may or may not have a stack // allocated. The G and its stack (if any) are owned by the M // that is exiting the G or that obtained the G from the free // list. _Gdead // 6 // _Genqueue_unused is currently unused. _Genqueue_unused // 7 // _Gcopystack means this goroutine's stack is being moved. It // is not executing user code and is not on a run queue. The // stack is owned by the goroutine that put it in _Gcopystack. _Gcopystack // 8 // _Gpreempted means this goroutine stopped itself for a // suspendG preemption. It is like _Gwaiting, but nothing is // yet responsible for ready()ing it. Some suspendG must CAS // the status to _Gwaiting to take responsibility for // ready()ing this G. _Gpreempted // 9 // _Gscan combined with one of the above states other than // _Grunning indicates that GC is scanning the stack. The // goroutine is not executing user code and the stack is owned // by the goroutine that set the _Gscan bit. // // _Gscanrunning is different: it is used to briefly block // state transitions while GC signals the G to scan its own // stack. This is otherwise like _Grunning. // // atomicstatus&~Gscan gives the state the goroutine will // return to when the scan completes. _Gscan = 0x1000 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 _Gscanrunning = _Gscan + _Grunning // 0x1002 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 _Gscanpreempted = _Gscan + _Gpreempted // 0x1009 )
const ( // _Pidle means a P is not being used to run user code or the // scheduler. Typically, it's on the idle P list and available // to the scheduler, but it may just be transitioning between // other states. // // The P is owned by the idle list or by whatever is // transitioning its state. Its run queue is empty. _Pidle = iota // _Prunning means a P is owned by an M and is being used to // run user code or the scheduler. Only the M that owns this P // is allowed to change the P's status from _Prunning. The M // may transition the P to _Pidle (if it has no more work to // do), _Psyscall (when entering a syscall), or _Pgcstop (to // halt for the GC). The M may also hand ownership of the P // off directly to another M (e.g., to schedule a locked G). _Prunning // _Psyscall means a P is not running user code. It has // affinity to an M in a syscall but is not owned by it and // may be stolen by another M. This is similar to _Pidle but // uses lightweight transitions and maintains M affinity. // // Leaving _Psyscall must be done with a CAS, either to steal // or retake the P. Note that there's an ABA hazard: even if // an M successfully CASes its original P back to _Prunning // after a syscall, it must understand the P may have been // used by another M in the interim. _Psyscall // _Pgcstop means a P is halted for STW and owned by the M // that stopped the world. The M that stopped the world // continues to use its P, even in _Pgcstop. Transitioning // from _Prunning to _Pgcstop causes an M to release its P and // park. // // The P retains its run queue and startTheWorld will restart // the scheduler on Ps with non-empty run queues. _Pgcstop // _Pdead means a P is no longer used (GOMAXPROCS shrank). We // reuse Ps if GOMAXPROCS increases. A dead P is mostly // stripped of its resources, though a few things remain // (e.g., trace buffers). _Pdead )
Values for the flags field of a sigTabT.
const ( _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel _SigKill // if signal.Notify doesn't take it, exit quietly _SigThrow // if signal.Notify doesn't take it, exit loudly _SigPanic // if the signal is from the kernel, panic _SigDefault // if the signal isn't explicitly requested, don't monitor it _SigGoExit // cause all runtime procs to exit (only used on Plan 9). _SigSetStack // add SA_ONSTACK to libc handler _SigUnblock // always unblock; see blockableSig _SigIgn // _SIG_DFL action is to ignore the signal )
const ( _TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions. _TraceTrap // the initial PC, SP are from a trap, not a return PC from a call _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it )
scase.kind values. Known to compiler. Changes here must also be made in src/cmd/compile/internal/gc/select.go's walkselectcases.
const ( caseNil = iota caseRecv caseSend caseDefault )
const ( _SIG_DFL uintptr = 0 _SIG_IGN uintptr = 1 )
const ( sigIdle = iota sigReceiving sigSending )
const ( _MaxSmallSize = 32768 smallSizeDiv = 8 smallSizeMax = 1024 largeSizeDiv = 128 _NumSizeClasses = 67 _PageShift = 13 )
const ( mantbits64 uint = 52 expbits64 uint = 11 bias64 = -1<<(expbits64-1) + 1 nan64 uint64 = (1<<expbits64-1)<<mantbits64 + 1<<(mantbits64-1) // quiet NaN, 0 payload inf64 uint64 = (1<<expbits64 - 1) << mantbits64 neg64 uint64 = 1 << (expbits64 + mantbits64) mantbits32 uint = 23 expbits32 uint = 8 bias32 = -1<<(expbits32-1) + 1 nan32 uint32 = (1<<expbits32-1)<<mantbits32 + 1<<(mantbits32-1) // quiet NaN, 0 payload inf32 uint32 = (1<<expbits32 - 1) << mantbits32 neg32 uint32 = 1 << (expbits32 + mantbits32) )
const ( // StackSystem is a number of additional bytes to add // to each stack below the usual guard area for OS-specific // purposes like signal handling. Used on Windows, Plan 9, // and iOS because they do not use a separate stack. _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 + sys.GoosDarwin*sys.GoarchArm64*1024 // The minimum size of stack used by Go code _StackMin = 2048 // The minimum stack size to allocate. // The hackery here rounds FixedStack0 up to a power of 2. _FixedStack0 = _StackMin + _StackSystem _FixedStack1 = _FixedStack0 - 1 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1) _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2) _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4) _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8) _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16) _FixedStack = _FixedStack6 + 1 // Functions that need frames bigger than this use an extra // instruction to do the stack split check, to avoid overflow // in case SP - framesize wraps below zero. // This value can be no bigger than the size of the unmapped // space at zero. _StackBig = 4096 // The stack guard is a pointer this many bytes above the // bottom of the stack. _StackGuard = 928*sys.StackGuardMultiplier + _StackSystem // After a stack split check the SP is allowed to be this // many bytes below the stack guard. This saves an instruction // in the checking sequence for tiny frames. _StackSmall = 128 // The maximum number of bytes that a chain of NOSPLIT // functions can use. _StackLimit = _StackGuard - _StackSystem - _StackSmall )
const ( // stackDebug == 0: no logging // == 1: logging of per-stack operations // == 2: logging of per-frame operations // == 3: logging of per-word updates // == 4: logging of per-word reads stackDebug = 0 stackFromSystem = 0 // allocate stacks from system memory instead of the heap stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy stackNoCache = 0 // disable per-P small stack caches // check the BP links during traceback. debugCheckBP = false )
const ( uintptrMask = 1<<(8*sys.PtrSize) - 1 // Goroutine preemption request. // Stored into g->stackguard0 to cause split stack check failure. // Must be greater than any real sp. // 0xfffffade in hex. stackPreempt = uintptrMask & -1314 // Thread is forking. // Stored into g->stackguard0 to cause split stack check failure. // Must be greater than any real sp. stackFork = uintptrMask & -1234 )
const ( maxUint = ^uint(0) maxInt = int(maxUint >> 1) )
PCDATA and FUNCDATA table indexes.
See funcdata.h and ../cmd/internal/objabi/funcdata.go.
const ( _PCDATA_RegMapIndex = 0 // if !go115ReduceLiveness _PCDATA_UnsafePoint = 0 // if go115ReduceLiveness _PCDATA_StackMapIndex = 1 _PCDATA_InlTreeIndex = 2 _FUNCDATA_ArgsPointerMaps = 0 _FUNCDATA_LocalsPointerMaps = 1 _FUNCDATA_RegPointerMaps = 2 // if !go115ReduceLiveness _FUNCDATA_StackObjects = 3 _FUNCDATA_InlTree = 4 _FUNCDATA_OpenCodedDeferInfo = 5 _ArgsSizeUnknown = -0x80000000 )
const ( // PCDATA_UnsafePoint values. _PCDATA_UnsafePointSafe = -1 // Safe for async preemption _PCDATA_UnsafePointUnsafe = -2 // Unsafe for async preemption // _PCDATA_Restart1(2) apply on a sequence of instructions, within // which if an async preemption happens, we should back off the PC // to the start of the sequence when resume. // We need two so we can distinguish the start/end of the sequence // in case that two sequences are next to each other. _PCDATA_Restart1 = -3 _PCDATA_Restart2 = -4 // Like _PCDATA_RestartAtEntry, but back to function entry if async // preempted. _PCDATA_RestartAtEntry = -5 )
Values for the timer status field.
const ( // Timer has no status set yet. timerNoStatus = iota // Waiting for timer to fire. // The timer is in some P's heap. timerWaiting // Running the timer function. // A timer will only have this status briefly. timerRunning // The timer is deleted and should be removed. // It should not be run, but it is still in some P's heap. timerDeleted // The timer is being removed. // The timer will only have this status briefly. timerRemoving // The timer has been stopped. // It is not in any P's heap. timerRemoved // The timer is being modified. // The timer will only have this status briefly. timerModifying // The timer has been modified to an earlier time. // The new when value is in the nextwhen field. // The timer is in some P's heap, possibly in the wrong place. timerModifiedEarlier // The timer has been modified to the same or a later time. // The new when value is in the nextwhen field. // The timer is in some P's heap, possibly in the wrong place. timerModifiedLater // The timer has been modified and is being moved. // The timer will only have this status briefly. timerMoving )
Event types in the trace, args are given in square brackets.
const ( traceEvNone = 0 // unused traceEvBatch = 1 // start of per-P batch of events [pid, timestamp] traceEvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)] traceEvStack = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}] traceEvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id] traceEvProcStart = 5 // start of P [timestamp, thread id] traceEvProcStop = 6 // stop of P [timestamp] traceEvGCStart = 7 // GC start [timestamp, seq, stack id] traceEvGCDone = 8 // GC done [timestamp] traceEvGCSTWStart = 9 // GC STW start [timestamp, kind] traceEvGCSTWDone = 10 // GC STW done [timestamp] traceEvGCSweepStart = 11 // GC sweep start [timestamp, stack id] traceEvGCSweepDone = 12 // GC sweep done [timestamp, swept, reclaimed] traceEvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id] traceEvGoStart = 14 // goroutine starts running [timestamp, goroutine id, seq] traceEvGoEnd = 15 // goroutine ends [timestamp] traceEvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack] traceEvGoSched = 17 // goroutine calls Gosched [timestamp, stack] traceEvGoPreempt = 18 // goroutine is preempted [timestamp, stack] traceEvGoSleep = 19 // goroutine calls Sleep [timestamp, stack] traceEvGoBlock = 20 // goroutine blocks [timestamp, stack] traceEvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack] traceEvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack] traceEvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack] traceEvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack] traceEvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack] traceEvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack] traceEvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack] traceEvGoSysCall = 28 // syscall enter [timestamp, stack] traceEvGoSysExit = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp] traceEvGoSysBlock = 30 // syscall blocks [timestamp] traceEvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id] traceEvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id] traceEvHeapAlloc = 33 // memstats.heap_live change [timestamp, heap_alloc] traceEvNextGC = 34 // memstats.next_gc change [timestamp, next_gc] traceEvTimerGoroutine = 35 // not currently used; previously denoted timer goroutine [timer goroutine id] traceEvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp] traceEvString = 37 // string dictionary entry [ID, length, string] traceEvGoStartLocal = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id] traceEvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack] traceEvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp] traceEvGoStartLabel = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id] traceEvGoBlockGC = 42 // goroutine blocks on GC assist [timestamp, stack] traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack] traceEvGCMarkAssistDone = 44 // GC mark assist done [timestamp] traceEvUserTaskCreate = 45 // trace.NewContext [timestamp, internal task id, internal parent task id, stack, name string] traceEvUserTaskEnd = 46 // end of a task [timestamp, internal task id, stack] traceEvUserRegion = 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), stack, name string] traceEvUserLog = 48 // trace.Log [timestamp, internal task id, key string id, stack, value string] traceEvCount = 49 )
const ( // Timestamps in trace are cputicks/traceTickDiv. // This makes absolute values of timestamp diffs smaller, // and so they are encoded in less number of bytes. // 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine). // The suggested increment frequency for PowerPC's time base register is // 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64 // and ppc64le. // Tracing won't work reliably for architectures where cputicks is emulated // by nanotime, so the value doesn't matter for those architectures. traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64) // Maximum number of PCs in a single stack trace. // Since events contain only stack id rather than whole stack trace, // we can allow quite large values here. traceStackSize = 128 // Identifier of a fake P that is used when we trace without a real P. traceGlobProc = -1 // Maximum number of bytes to encode uint64 in base-128. traceBytesPerNumber = 10 // Shift of the number of arguments in the first event byte. traceArgCountShift = 6 // Flag passed to traceGoPark to denote that the previous wakeup of this // goroutine was futile. For example, a goroutine was unblocked on a mutex, // but another goroutine got ahead and acquired the mutex before the first // goroutine is scheduled, so the first goroutine has to block again. // Such wakeups happen on buffered channels and sync.Mutex, // but are generally not interesting for end user. traceFutileWakeup byte = 128 )
const ( kindBool = 1 + iota kindInt kindInt8 kindInt16 kindInt32 kindInt64 kindUint kindUint8 kindUint16 kindUint32 kindUint64 kindUintptr kindFloat32 kindFloat64 kindComplex64 kindComplex128 kindArray kindChan kindFunc kindInterface kindMap kindPtr kindSlice kindString kindStruct kindUnsafePointer kindDirectIface = 1 << 5 kindGCProg = 1 << 6 kindMask = (1 << 5) - 1 )
Numbers fundamental to the encoding.
const ( runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" runeSelf = 0x80 // characters below runeSelf are represented as themselves in a single byte. maxRune = '\U0010FFFF' // Maximum valid Unicode code point. )
Code points in the surrogate range are not valid for UTF-8.
const ( surrogateMin = 0xD800 surrogateMax = 0xDFFF )
const ( t1 = 0x00 // 0000 0000 tx = 0x80 // 1000 0000 t2 = 0xC0 // 1100 0000 t3 = 0xE0 // 1110 0000 t4 = 0xF0 // 1111 0000 t5 = 0xF8 // 1111 1000 maskx = 0x3F // 0011 1111 mask2 = 0x1F // 0001 1111 mask3 = 0x0F // 0000 1111 mask4 = 0x07 // 0000 0111 rune1Max = 1<<7 - 1 rune2Max = 1<<11 - 1 rune3Max = 1<<16 - 1 // The default lowest and highest continuation byte. locb = 0x80 // 1000 0000 hicb = 0xBF // 1011 1111 )
const ( _AT_SYSINFO_EHDR = 33 _PT_LOAD = 1 /* Loadable program segment */ _PT_DYNAMIC = 2 /* Dynamic linking information */ _DT_NULL = 0 /* Marks end of dynamic section */ _DT_HASH = 4 /* Dynamic symbol hash table */ _DT_STRTAB = 5 /* Address of string table */ _DT_SYMTAB = 6 /* Address of symbol table */ _DT_GNU_HASH = 0x6ffffef5 /* GNU-style dynamic symbol hash table */ _DT_VERSYM = 0x6ffffff0 _DT_VERDEF = 0x6ffffffc _VER_FLG_BASE = 0x1 /* Version definition of file itself */ _SHN_UNDEF = 0 /* Undefined section */ _SHT_DYNSYM = 11 /* Dynamic linker symbol table */ _STT_FUNC = 2 /* Symbol is a code object */ _STT_NOTYPE = 0 /* Symbol type is not specified */ _STB_GLOBAL = 1 /* Global symbol */ _STB_WEAK = 2 /* Weak symbol */ _EI_NIDENT = 16 // Maximum indices for the array types used when traversing the vDSO ELF structures. // Computed from architecture-specific max provided by vdso_linux_*.go vdsoSymTabSize = vdsoArrayMax / unsafe.Sizeof(elfSym{}) vdsoDynSize = vdsoArrayMax / unsafe.Sizeof(elfDyn{}) vdsoSymStringsSize = vdsoArrayMax // byte vdsoVerSymSize = vdsoArrayMax / 2 // uint16 vdsoHashSize = vdsoArrayMax / 4 // uint32 // vdsoBloomSizeScale is a scaling factor for gnuhash tables which are uint32 indexed, // but contain uintptrs vdsoBloomSizeScale = unsafe.Sizeof(uintptr(0)) / 4 // uint32 )
Compiler is the name of the compiler toolchain that built the running binary. Known toolchains are:
gc Also known as cmd/compile. gccgo The gccgo front end, part of the GCC compiler suite.
const Compiler = "gc"
GOARCH is the running program's architecture target: one of 386, amd64, arm, s390x, and so on.
const GOARCH string = sys.GOARCH
GOOS is the running program's operating system target: one of darwin, freebsd, linux, and so on. To view possible combinations of GOOS and GOARCH, run "go tool dist list".
const GOOS string = sys.GOOS
const ( // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. _GoidCacheBatch = 16 )
The maximum number of frames we print for a traceback
const _TracebackMaxFrames = 100
const __NEW_UTS_LEN = 64
buffer of pending write data
const (
bufSize = 4096
)
const cgoCheckPointerFail = "cgo argument has Go pointer to Go pointer"
const cgoResultFail = "cgo result has Go pointer"
const cgoWriteBarrierFail = "Go pointer stored into non-Go memory"
debugCachedWork enables extra checks for debugging premature mark termination.
For debugging issue #27993.
const debugCachedWork = false
debugLogBytes is the size of each per-M ring buffer. This is allocated off-heap to avoid blowing up the M and hence the GC'd heap size.
const debugLogBytes = 16 << 10
debugLogStringLimit is the maximum number of bytes in a string. Above this, the string will be truncated with "..(n more bytes).."
const debugLogStringLimit = debugLogBytes / 8
const debugPcln = false
const debugSelect = false
defaultHeapMinimum is the value of heapminimum for GOGC==100.
const defaultHeapMinimum = 4 << 20
const dlogEnabled = false
const fastlogNumBits = 5
forcePreemptNS is the time slice given to a G before it is preempted.
const forcePreemptNS = 10 * 1000 * 1000 // 10ms
freezeStopWait is a large value that freezetheworld sets sched.stopwait to in order to request that all Gs permanently stop.
const freezeStopWait = 0x7fffffff
gcAssistTimeSlack is the nanoseconds of mutator assist time that can accumulate on a P before updating gcController.assistTime.
const gcAssistTimeSlack = 5000
gcBackgroundUtilization is the fixed CPU utilization for background marking. It must be <= gcGoalUtilization. The difference between gcGoalUtilization and gcBackgroundUtilization will be made up by mark assists. The scheduler will aim to use within 50% of this goal.
Setting this to < gcGoalUtilization avoids saturating the trigger feedback controller when there are no assists, which allows it to better control CPU and heap growth. However, the larger the gap, the more mutator assists are expected to happen, which impact mutator latency.
const gcBackgroundUtilization = 0.25
const gcBitsChunkBytes = uintptr(64 << 10)
const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
gcCreditSlack is the amount of scan work credit that can accumulate locally before updating gcController.scanWork and, optionally, gcController.bgScanCredit. Lower values give a more accurate assist ratio and make it more likely that assists will successfully steal background credit. Higher values reduce memory contention.
const gcCreditSlack = 2000
gcGoalUtilization is the goal CPU utilization for marking as a fraction of GOMAXPROCS.
const gcGoalUtilization = 0.30
gcOverAssistWork determines how many extra units of scan work a GC assist does when an assist happens. This amortizes the cost of an assist by pre-paying for this many bytes of future allocations.
const gcOverAssistWork = 64 << 10
Keep in sync with cmd/compile/internal/gc/plive.go:go115ReduceLiveness.
const go115ReduceLiveness = true
const go115RestartSeq = go115ReduceLiveness && true // enable restartable sequences
const hashRandomBytes = sys.PtrSize / 4 * 64
const itabInitSize = 512
const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
const maxCPUProfStack = 64
maxWhen is the maximum value for timer's when field.
const maxWhen = 1<<63 - 1
const maxZero = 1024 // must match value in cmd/compile/internal/gc/walk.go:zeroValSize
const minfunc = 16 // minimum function size
const msanenabled = false
osRelaxMinNS is the number of nanoseconds of idleness to tolerate without performing an osRelax. Since osRelax may reduce the precision of timers, this should be enough larger than the relaxed timer precision to keep the timer error acceptable.
const osRelaxMinNS = 0
const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache)
const pcbucketsize = 256 * minfunc // size of bucket in the pc->func lookup table
persistentChunkSize is the number of bytes we allocate when we grow a persistentAlloc.
const persistentChunkSize = 256 << 10
const pollBlockSize = 4 * 1024
const preemptMSupported = true
const raceenabled = false
To shake out latent assumptions about scheduling order, we introduce some randomness into scheduling decisions when running with the race detector. The need for this was made obvious by changing the (deterministic) scheduling order in Go 1.5 and breaking many poorly-written tests. With the randomness here, as long as the tests pass consistently with -race, they shouldn't have latent scheduling assumptions.
const randomizeScheduler = raceenabled
const rwmutexMaxReaders = 1 << 30
Prime to not correlate with any user patterns.
const semTabSize = 251
sigPreempt is the signal used for non-cooperative preemption.
There's no good way to choose this signal, but there are some heuristics:
1. It should be a signal that's passed-through by debuggers by default. On Linux, this is SIGALRM, SIGURG, SIGCHLD, SIGIO, SIGVTALRM, SIGPROF, and SIGWINCH, plus some glibc-internal signals.
2. It shouldn't be used internally by libc in mixed Go/C binaries because libc may assume it's the only thing that can handle these signals. For example SIGCANCEL or SIGSETXID.
3. It should be a signal that can happen spuriously without consequences. For example, SIGALRM is a bad choice because the signal handler can't tell if it was caused by the real process alarm or not (arguably this means the signal is broken, but I digress). SIGUSR1 and SIGUSR2 are also bad because those are often used in meaningful ways by applications.
4. We need to deal with platforms without real-time signals (like macOS), so those are out.
We use SIGURG because it meets all of these criteria, is extremely unlikely to be used by an application for its "real" meaning (both because out-of-band data is basically unused and because SIGURG doesn't report which socket has the condition, making it pretty useless), and even if it is, the application has to be ready for spurious SIGURG. SIGIO wouldn't be a bad choice either, but is more likely to be used for real.
const sigPreempt = _SIGURG
const sizeofSkipFunction = 256
const stackTraceDebug = false
testSmallBuf forces a small write barrier buffer to stress write barrier flushing.
const testSmallBuf = false
The constant is known to the compiler. There is no fundamental theory behind this number.
const tmpStringBufSize = 32
const usesLR = sys.MinFrameSize > 0
const ( // vdsoArrayMax is the byte-size of a maximally sized array on this architecture. // See cmd/compile/internal/amd64/galign.go arch.MAXWIDTH initialization. vdsoArrayMax = 1<<50 - 1 )
verifyTimers can be set to true to add debugging checks that the timer heaps are valid.
const verifyTimers = false
Variables
var ( _cgo_init unsafe.Pointer _cgo_thread_start unsafe.Pointer _cgo_sys_thread_create unsafe.Pointer _cgo_notify_runtime_init_done unsafe.Pointer _cgo_callers unsafe.Pointer _cgo_set_context_function unsafe.Pointer _cgo_yield unsafe.Pointer )
var ( // Set in runtime.cpuinit. // TODO: deprecate these; use internal/cpu directly. x86HasPOPCNT bool x86HasSSE41 bool x86HasFMA bool armHasVFPv4 bool arm64HasATOMICS bool )
var ( itabLock mutex // lock for accessing itab table itabTable = &itabTableInit // pointer to current table itabTableInit = itabTableType{size: itabInitSize} // starter table )
var ( uint16Eface interface{} = uint16InterfacePtr(0) uint32Eface interface{} = uint32InterfacePtr(0) uint64Eface interface{} = uint64InterfacePtr(0) stringEface interface{} = stringInterfacePtr("") sliceEface interface{} = sliceInterfacePtr(nil) uint16Type *_type = efaceOf(&uint16Eface)._type uint32Type *_type = efaceOf(&uint32Eface)._type uint64Type *_type = efaceOf(&uint64Eface)._type stringType *_type = efaceOf(&stringEface)._type sliceType *_type = efaceOf(&sliceEface)._type )
physHugePageSize is the size in bytes of the OS's default physical huge page size whose allocation is opaque to the application. It is assumed and verified to be a power of two.
If set, this must be set by the OS init code (typically in osinit) before mallocinit. However, setting it at all is optional, and leaving the default value is always safe (though potentially less efficient).
Since physHugePageSize is always assumed to be a power of two, physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. The purpose of physHugePageShift is to avoid doing divisions in performance critical functions.
var ( physHugePageSize uintptr physHugePageShift uint )
var ( fingCreate uint32 fingRunning bool )
var ( mbuckets *bucket // memory profile buckets bbuckets *bucket // blocking profile buckets xbuckets *bucket // mutex profile buckets buckhash *[179999]*bucket bucketmem uintptr mProf struct { // cycle is the global heap profile cycle. This wraps // at mProfCycleWrap. cycle uint32 // flushed indicates that future[cycle] in all buckets // has been flushed to the active profile. flushed bool } )
var ( // minOffAddr is the minimum address in the offset space, and // it corresponds to the virtual address arenaBaseOffset. minOffAddr = offAddr{arenaBaseOffset} // maxOffAddr is the maximum address in the offset address // space. It corresponds to the highest virtual address representable // by the page alloc chunk and heap arena maps. maxOffAddr = offAddr{(((1 << heapAddrBits) - 1) + arenaBaseOffset) & uintptrMask} )
var ( netpollInitLock mutex netpollInited uint32 pollcache pollCache netpollWaiters uint32 )
var ( epfd int32 = -1 // epoll descriptor netpollBreakRd, netpollBreakWr uintptr // for netpollBreak netpollWakeSig uint32 // used to avoid duplicate calls of netpollBreak )
var ( // printBacklog is a circular buffer of messages written with the builtin // print* functions, for use in postmortem analysis of core dumps. printBacklog [512]byte printBacklogIndex int )
var ( m0 m g0 g mcache0 *mcache raceprocctx0 uintptr )
var ( argc int32 argv **byte )
var ( allglen uintptr allm *m allp []*p // len(allp) == gomaxprocs; may change at safe points, otherwise immutable allpLock mutex // Protects P-less reads of allp and all writes gomaxprocs int32 ncpu int32 forcegc forcegcstate sched schedt newprocs int32 // Information about what cpu features are available. // Packages outside the runtime should not use these // as they are not an external api. // Set on startup in asm_{386,amd64}.s processorVersionInfo uint32 isIntel bool lfenceBeforeRdtsc bool goarm uint8 // set by cmd/link on arm systems framepointer_enabled bool // set by cmd/link )
Set by the linker so the runtime can determine the buildmode.
var ( islibrary bool // -buildmode=c-shared isarchive bool // -buildmode=c-archive )
var ( chansendpc = funcPC(chansend) chanrecvpc = funcPC(chanrecv) )
channels for synchronizing signal mask updates with the signal mask thread
var ( disableSigChan chan uint32 enableSigChan chan uint32 maskUpdatedChan chan struct{} )
initialize with vsyscall fallbacks
var ( vdsoGettimeofdaySym uintptr = 0xffffffffff600000 vdsoClockgettimeSym uintptr = 0 )
MemProfileRate controls the fraction of memory allocations that are recorded and reported in the memory profile. The profiler aims to sample an average of one allocation per MemProfileRate bytes allocated.
To include every allocated block in the profile, set MemProfileRate to 1. To turn off profiling entirely, set MemProfileRate to 0.
The tools that process the memory profiles assume that the profile rate is constant across the lifetime of the program and equal to the current value. Programs that change the memory profiling rate should do so just once, as early as possible in the execution of the program (for example, at the beginning of main).
var MemProfileRate int = 512 * 1024
Make the compiler check that heapBits.arena is large enough to hold the maximum arena frame number.
var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1}
_cgo_mmap is filled in by runtime/cgo when it is linked into the program, so it is only non-nil when using cgo.
var _cgo_mmap unsafe.Pointer
_cgo_munmap is filled in by runtime/cgo when it is linked into the program, so it is only non-nil when using cgo.
var _cgo_munmap unsafe.Pointer
var _cgo_setenv unsafe.Pointer // pointer to C function
_cgo_sigaction is filled in by runtime/cgo when it is linked into the program, so it is only non-nil when using cgo.
var _cgo_sigaction unsafe.Pointer
var _cgo_unsetenv unsafe.Pointer // pointer to C function
var addrspace_vec [1]byte
var adviseUnused = uint32(_MADV_FREE)
used in asm_{386,amd64,arm64}.s to seed the hash function
var aeskeysched [hashRandomBytes]byte
var argslice []string
asyncPreemptStack is the bytes of stack space required to inject an asyncPreempt call.
var asyncPreemptStack = ^uintptr(0)
var badginsignalMsg = "fatal: bad g in signal handler\n"
var badmorestackg0Msg = "fatal: morestack on g0\n"
var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
var badsystemstackMsg = "fatal: systemstack called from unexpected goroutine"
var blockprofilerate uint64 // in CPU ticks
boundsErrorFmts provide error text for various out-of-bounds panics. Note: if you change these strings, you should adjust the size of the buffer in boundsError.Error below as well.
var boundsErrorFmts = [...]string{ boundsIndex: "index out of range [%x] with length %y", boundsSliceAlen: "slice bounds out of range [:%x] with length %y", boundsSliceAcap: "slice bounds out of range [:%x] with capacity %y", boundsSliceB: "slice bounds out of range [%x:%y]", boundsSlice3Alen: "slice bounds out of range [::%x] with length %y", boundsSlice3Acap: "slice bounds out of range [::%x] with capacity %y", boundsSlice3B: "slice bounds out of range [:%x:%y]", boundsSlice3C: "slice bounds out of range [%x:%y:]", }
boundsNegErrorFmts are overriding formats if x is negative. In this case there's no need to report y.
var boundsNegErrorFmts = [...]string{ boundsIndex: "index out of range [%x]", boundsSliceAlen: "slice bounds out of range [:%x]", boundsSliceAcap: "slice bounds out of range [:%x]", boundsSliceB: "slice bounds out of range [%x:]", boundsSlice3Alen: "slice bounds out of range [::%x]", boundsSlice3Acap: "slice bounds out of range [::%x]", boundsSlice3B: "slice bounds out of range [:%x:]", boundsSlice3C: "slice bounds out of range [%x::]", }
var buf [bufSize]byte
var buildVersion = sys.TheVersion
cgoAlwaysFalse is a boolean value that is always false. The cgo-generated code says if cgoAlwaysFalse { cgoUse(p) }. The compiler cannot see that cgoAlwaysFalse is always false, so it emits the test and keeps the call, giving the desired escape analysis result. The test is cheaper than the call.
var cgoAlwaysFalse bool
var cgoContext unsafe.Pointer
cgoHasExtraM is set on startup when an extra M is created for cgo. The extra M must be created before any C/C++ code calls cgocallback.
var cgoHasExtraM bool
var cgoSymbolizer unsafe.Pointer
When running with cgo, we call _cgo_thread_start to start threads for us so that we can play nicely with foreign code.
var cgoThreadStart unsafe.Pointer
var cgoTraceback unsafe.Pointer
var cgo_yield = &_cgo_yield
var class_to_allocnpages = [_NumSizeClasses]uint8{0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 2, 3, 1, 3, 2, 3, 4, 5, 6, 1, 7, 6, 5, 4, 3, 5, 7, 2, 9, 7, 5, 8, 3, 10, 7, 4}
var class_to_divmagic = [_NumSizeClasses]divMagic{{0, 0, 0, 0}, {3, 0, 1, 65528}, {4, 0, 1, 65520}, {5, 0, 1, 65504}, {4, 11, 683, 0}, {6, 0, 1, 65472}, {4, 10, 205, 0}, {5, 9, 171, 0}, {4, 11, 293, 0}, {7, 0, 1, 65408}, {4, 13, 911, 0}, {5, 10, 205, 0}, {4, 12, 373, 0}, {6, 9, 171, 0}, {4, 13, 631, 0}, {5, 11, 293, 0}, {4, 13, 547, 0}, {8, 0, 1, 65280}, {5, 9, 57, 0}, {6, 9, 103, 0}, {5, 12, 373, 0}, {7, 7, 43, 0}, {5, 10, 79, 0}, {6, 10, 147, 0}, {5, 11, 137, 0}, {9, 0, 1, 65024}, {6, 9, 57, 0}, {7, 9, 103, 0}, {6, 11, 187, 0}, {8, 7, 43, 0}, {7, 8, 37, 0}, {10, 0, 1, 64512}, {7, 9, 57, 0}, {8, 6, 13, 0}, {7, 11, 187, 0}, {9, 5, 11, 0}, {8, 8, 37, 0}, {11, 0, 1, 63488}, {8, 9, 57, 0}, {7, 10, 49, 0}, {10, 5, 11, 0}, {7, 10, 41, 0}, {7, 9, 19, 0}, {12, 0, 1, 61440}, {8, 9, 27, 0}, {8, 10, 49, 0}, {11, 5, 11, 0}, {7, 13, 161, 0}, {7, 13, 155, 0}, {8, 9, 19, 0}, {13, 0, 1, 57344}, {8, 12, 111, 0}, {9, 9, 27, 0}, {11, 6, 13, 0}, {7, 14, 193, 0}, {12, 3, 3, 0}, {8, 13, 155, 0}, {11, 8, 37, 0}, {14, 0, 1, 49152}, {11, 8, 29, 0}, {7, 13, 55, 0}, {12, 5, 7, 0}, {8, 14, 193, 0}, {13, 3, 3, 0}, {7, 14, 77, 0}, {12, 7, 19, 0}, {15, 0, 1, 32768}}
var class_to_size = [_NumSizeClasses]uint16{0, 8, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, 288, 320, 352, 384, 416, 448, 480, 512, 576, 640, 704, 768, 896, 1024, 1152, 1280, 1408, 1536, 1792, 2048, 2304, 2688, 3072, 3200, 3456, 4096, 4864, 5376, 6144, 6528, 6784, 6912, 8192, 9472, 9728, 10240, 10880, 12288, 13568, 14336, 16384, 18432, 19072, 20480, 21760, 24576, 27264, 28672, 32768}
consec8tab is a table containing the number of consecutive zero bits for any uint8 value.
The table is generated by calling consec8(i) for each possible uint8 value, which is defined as:
// consec8 counts the maximum number of consecutive 0 bits // in a uint8. func consec8(n uint8) int {
n = ^n i := 0 for n != 0 { n &= (n << 1) i++ } return i
}
var consec8tab = [256]uint{ 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 4, 3, 3, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 4, 3, 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 6, 5, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 4, 3, 2, 2, 2, 1, 1, 1, 3, 2, 1, 1, 2, 1, 1, 1, 5, 4, 3, 3, 2, 2, 2, 2, 3, 2, 1, 1, 2, 1, 1, 1, 4, 3, 2, 2, 2, 1, 1, 1, 3, 2, 1, 1, 2, 1, 1, 1, 7, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 5, 4, 3, 3, 2, 2, 2, 2, 3, 2, 1, 1, 2, 1, 1, 1, 4, 3, 2, 2, 2, 1, 1, 1, 3, 2, 1, 1, 2, 1, 1, 1, 6, 5, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 4, 3, 2, 2, 2, 1, 1, 1, 3, 2, 1, 1, 2, 1, 1, 1, 5, 4, 3, 3, 2, 2, 2, 2, 3, 2, 1, 1, 2, 1, 1, 1, 4, 3, 2, 2, 2, 1, 1, 1, 3, 2, 1, 1, 2, 1, 1, 0, }
crashing is the number of m's we have waited for when implementing GOTRACEBACK=crash when a signal is received.
var crashing int32
var dbgvars = []dbgVar{
{"allocfreetrace", &debug.allocfreetrace},
{"clobberfree", &debug.clobberfree},
{"cgocheck", &debug.cgocheck},
{"efence", &debug.efence},
{"gccheckmark", &debug.gccheckmark},
{"gcpacertrace", &debug.gcpacertrace},
{"gcshrinkstackoff", &debug.gcshrinkstackoff},
{"gcstoptheworld", &debug.gcstoptheworld},
{"gctrace", &debug.gctrace},
{"invalidptr", &debug.invalidptr},
{"madvdontneed", &debug.madvdontneed},
{"sbrk", &debug.sbrk},
{"scavenge", &debug.scavenge},
{"scavtrace", &debug.scavtrace},
{"scheddetail", &debug.scheddetail},
{"schedtrace", &debug.schedtrace},
{"tracebackancestors", &debug.tracebackancestors},
{"asyncpreemptoff", &debug.asyncpreemptoff},
}
Holds variables parsed from GODEBUG env var, except for "memprofilerate" since there is an existing int var for that value, which may already have an initial value.
var debug struct { allocfreetrace int32 cgocheck int32 clobberfree int32 efence int32 gccheckmark int32 gcpacertrace int32 gcshrinkstackoff int32 gcstoptheworld int32 gctrace int32 invalidptr int32 madvdontneed int32 // for Linux; issue 28466 sbrk int32 scavenge int32 scavtrace int32 scheddetail int32 schedtrace int32 tracebackancestors int32 asyncpreemptoff int32 }
var debugPtrmask struct { lock mutex data *byte }
var didothers bool
var divideError = error(errorString("integer divide by zero"))
var dumpfd uintptr // fd to write the dump to.
var dumphdr = []byte("go1.7 heap dump\n")
var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
var envs []string
var extraMCount uint32 // Protected by lockextra
var extraMWaiters uint32
var extram uintptr
var failallocatestack = []byte("runtime: failed to allocate stack for the new OS thread\n")
var failthreadcreate = []byte("runtime: failed to create new OS thread\n")
faketime is the simulated time in nanoseconds since 1970 for the playground.
Zero means not to use faketime.
var faketime int64
var fastlog2Table = [1<<fastlogNumBits + 1]float64{ 0, 0.0443941193584535, 0.08746284125033943, 0.12928301694496647, 0.16992500144231248, 0.2094533656289499, 0.24792751344358555, 0.28540221886224837, 0.3219280948873623, 0.3575520046180837, 0.39231742277876036, 0.4262647547020979, 0.4594316186372973, 0.4918530963296748, 0.5235619560570128, 0.5545888516776374, 0.5849625007211563, 0.6147098441152082, 0.6438561897747247, 0.6724253419714956, 0.7004397181410922, 0.7279204545631992, 0.7548875021634686, 0.7813597135246596, 0.8073549220576042, 0.8328900141647417, 0.8579809951275721, 0.8826430493618412, 0.9068905956085185, 0.9307373375628862, 0.9541963103868752, 0.9772799234999164, 1, }
var fastrandseed uintptr
var finalizer1 = [...]byte{ 1<<0 | 1<<1 | 0<<2 | 1<<3 | 1<<4 | 1<<5 | 1<<6 | 0<<7, 1<<0 | 1<<1 | 1<<2 | 1<<3 | 0<<4 | 1<<5 | 1<<6 | 1<<7, 1<<0 | 0<<1 | 1<<2 | 1<<3 | 1<<4 | 1<<5 | 0<<6 | 1<<7, 1<<0 | 1<<1 | 1<<2 | 0<<3 | 1<<4 | 1<<5 | 1<<6 | 1<<7, 0<<0 | 1<<1 | 1<<2 | 1<<3 | 1<<4 | 0<<5 | 1<<6 | 1<<7, }
var fingwait bool
var fingwake bool
var finptrmask [_FinBlockSize / sys.PtrSize / 8]byte
var floatError = error(errorString("floating point error"))
forcegcperiod is the maximum time in nanoseconds between garbage collections. If we go this long without a garbage collection, one is forced to run.
This is a variable for testing purposes. It normally doesn't change.
var forcegcperiod int64 = 2 * 60 * 1e9
Bit vector of free marks. Needs to be as big as the largest number of objects per span.
var freemark [_PageSize / 8]bool
freezing is set to non-zero if the runtime is trying to freeze the world.
var freezing uint32
Stores the signal handlers registered before Go installed its own. These signal handlers will be invoked in cases where Go doesn't want to handle a particular signal (e.g., signal occurred on a non-Go thread). See sigfwdgo for more information on when the signals are forwarded.
This is read by the signal handler; accesses should use atomic.Loaduintptr and atomic.Storeuintptr.
var fwdSig [_NSIG]uintptr
var gStatusStrings = [...]string{ _Gidle: "idle", _Grunnable: "runnable", _Grunning: "running", _Gsyscall: "syscall", _Gwaiting: "waiting", _Gdead: "dead", _Gcopystack: "copystack", _Gpreempted: "preempted", }
var gcBitsArenas struct { lock mutex free *gcBitsArena next *gcBitsArena // Read atomically. Write atomically under lock. current *gcBitsArena previous *gcBitsArena }
gcBlackenEnabled is 1 if mutator assists and background mark workers are allowed to blacken objects. This must only be set when gcphase == _GCmark.
var gcBlackenEnabled uint32
gcMarkDoneFlushed counts the number of P's with flushed work.
Ideally this would be a captured local in gcMarkDone, but forEachP escapes its callback closure, so it can't capture anything.
This is protected by markDoneSema.
var gcMarkDoneFlushed uint32
gcMarkWorkerModeStrings are the strings labels of gcMarkWorkerModes to use in execution traces.
var gcMarkWorkerModeStrings = [...]string{ "GC (dedicated)", "GC (fractional)", "GC (idle)", }
gcWorkPauseGen is for debugging the mark completion algorithm. gcWork put operations spin while gcWork.pauseGen == gcWorkPauseGen. Only used if debugCachedWork is true.
For debugging issue #27993.
var gcWorkPauseGen uint32 = 1
Initialized from $GOGC. GOGC=off means no GC.
var gcpercent int32
Garbage collector phase. Indicates to write barrier and synchronization task to perform.
var gcphase uint32
Holding gcsema grants the M the right to block a GC, and blocks until the current GC is done. In particular, it prevents gomaxprocs from changing concurrently.
TODO(mknyszek): Once gomaxprocs and the execution tracer can handle being changed/enabled during a GC, remove this.
var gcsema uint32 = 1
var globalAlloc struct {
mutex
persistentAlloc
}
gsignalInitQuirk, if non-nil, is called for every allocated gsignal G.
TODO(austin): Remove this after Go 1.15 when we remove the mlockGsignal workaround.
var gsignalInitQuirk func(gsignal *g)
handlingSig is indexed by signal number and is non-zero if we are currently handling the signal. Or, to put it another way, whether the signal handler is currently set to the Go signal handler or not. This is uint32 rather than bool so that we can use atomic instructions.
var handlingSig [_NSIG]uint32
exported value for testing
var hashLoad = float32(loadFactorNum) / float32(loadFactorDen)
used in hash{32,64}.go to seed the hash function
var hashkey [4]uintptr
heapminimum is the minimum heap size at which to trigger GC. For small heaps, this overrides the usual GOGC*live set rule.
When there is a very small live set but a lot of allocation, simply collecting when the heap reaches GOGC*live results in many GC cycles and high total per-GC overhead. This minimum amortizes this per-GC overhead while keeping the heap reasonably small.
During initialization this is set to 4MB*GOGC/100. In the case of GOGC==0, this will set heapminimum to 0, resulting in constant collection even when the heap size is small, which is useful for debugging.
var heapminimum uint64 = defaultHeapMinimum
inForkedChild is true while manipulating signals in the child process. This is used to avoid calling libc functions in case we are using vfork.
var inForkedChild bool
var inf = float64frombits(0x7FF0000000000000)
iscgo is set to true by the runtime/cgo package
var iscgo bool
var labelSync uintptr
levelBits is the number of bits in the radix for a given level in the super summary structure.
The sum of all the entries of levelBits should equal heapAddrBits.
var levelBits = [summaryLevels]uint{ summaryL0Bits, summaryLevelBits, summaryLevelBits, summaryLevelBits, summaryLevelBits, }
levelLogPages is log2 the maximum number of runtime pages in the address space a summary in the given level represents.
The leaf level always represents exactly log2 of 1 chunk's worth of pages.
var levelLogPages = [summaryLevels]uint{ logPallocChunkPages + 4*summaryLevelBits, logPallocChunkPages + 3*summaryLevelBits, logPallocChunkPages + 2*summaryLevelBits, logPallocChunkPages + 1*summaryLevelBits, logPallocChunkPages, }
levelShift is the number of bits to shift to acquire the radix for a given level in the super summary structure.
With levelShift, one can compute the index of the summary at level l related to a pointer p by doing:
p >> levelShift[l]
var levelShift = [summaryLevels]uint{ heapAddrBits - summaryL0Bits, heapAddrBits - summaryL0Bits - 1*summaryLevelBits, heapAddrBits - summaryL0Bits - 2*summaryLevelBits, heapAddrBits - summaryL0Bits - 3*summaryLevelBits, heapAddrBits - summaryL0Bits - 4*summaryLevelBits, }
lockNames gives the names associated with each of the above ranks
var lockNames = []string{ lockRankDummy: "", lockRankSysmon: "sysmon", lockRankScavenge: "scavenge", lockRankForcegc: "forcegc", lockRankSweepWaiters: "sweepWaiters", lockRankAssistQueue: "assistQueue", lockRankCpuprof: "cpuprof", lockRankSweep: "sweep", lockRankSched: "sched", lockRankDeadlock: "deadlock", lockRankPanic: "panic", lockRankAllg: "allg", lockRankAllp: "allp", lockRankPollDesc: "pollDesc", lockRankTimers: "timers", lockRankItab: "itab", lockRankReflectOffs: "reflectOffs", lockRankHchan: "hchan", lockRankFin: "fin", lockRankNotifyList: "notifyList", lockRankTraceBuf: "traceBuf", lockRankTraceStrings: "traceStrings", lockRankMspanSpecial: "mspanSpecial", lockRankProf: "prof", lockRankGcBitsArenas: "gcBitsArenas", lockRankRoot: "root", lockRankTrace: "trace", lockRankTraceStackTab: "traceStackTab", lockRankNetpollInit: "netpollInit", lockRankRwmutexW: "rwmutexW", lockRankRwmutexR: "rwmutexR", lockRankMcentral: "mcentral", lockRankSpine: "spine", lockRankSpanSetSpine: "spanSetSpine", lockRankGscan: "gscan", lockRankStackpool: "stackpool", lockRankStackLarge: "stackLarge", lockRankDefer: "defer", lockRankSudog: "sudog", lockRankWbufSpans: "wbufSpans", lockRankMheap: "mheap", lockRankMheapSpecial: "mheapSpecial", lockRankGlobalAlloc: "globalAlloc.mutex", lockRankGFree: "gFree", lockRankHchanLeaf: "hchanLeaf", lockRankNewmHandoff: "newmHandoff.lock", lockRankDebugPtrmask: "debugPtrmask.lock", lockRankFaketimeState: "faketimeState.lock", lockRankTicks: "ticks.lock", lockRankRaceFini: "raceFiniLock", lockRankPollCache: "pollCache.lock", lockRankDebug: "debugLock", }
lockPartialOrder is a partial order among the various lock types, listing the immediate ordering that has actually been observed in the runtime. Each entry (which corresponds to a particular lock rank) specifies the list of locks that can be already be held immediately "above" it.
So, for example, the lockRankSched entry shows that all the locks preceding it in rank can actually be held. The fin lock shows that only the sched, timers, or hchan lock can be held immediately above it when it is acquired.
var lockPartialOrder [][]lockRank = [][]lockRank{
lockRankDummy: {},
lockRankSysmon: {},
lockRankScavenge: {lockRankSysmon},
lockRankForcegc: {lockRankSysmon},
lockRankSweepWaiters: {},
lockRankAssistQueue: {},
lockRankCpuprof: {},
lockRankSweep: {},
lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep},
lockRankDeadlock: {lockRankDeadlock},
lockRankPanic: {lockRankDeadlock},
lockRankAllg: {lockRankSysmon, lockRankSched, lockRankPanic},
lockRankAllp: {lockRankSysmon, lockRankSched},
lockRankPollDesc: {},
lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSched, lockRankAllp, lockRankPollDesc, lockRankTimers},
lockRankItab: {},
lockRankReflectOffs: {lockRankItab},
lockRankHchan: {lockRankScavenge, lockRankSweep, lockRankHchan},
lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankSched, lockRankAllg, lockRankTimers, lockRankHchan},
lockRankNotifyList: {},
lockRankTraceBuf: {lockRankSysmon, lockRankScavenge},
lockRankTraceStrings: {lockRankTraceBuf},
lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings},
lockRankProf: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan},
lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan},
lockRankRoot: {},
lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankAssistQueue, lockRankSched, lockRankHchan, lockRankTraceBuf, lockRankTraceStrings, lockRankRoot, lockRankSweep},
lockRankTraceStackTab: {lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankAllg, lockRankTimers, lockRankHchan, lockRankFin, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankRoot, lockRankTrace},
lockRankNetpollInit: {lockRankTimers},
lockRankRwmutexW: {},
lockRankRwmutexR: {lockRankRwmutexW},
lockRankMcentral: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan},
lockRankSpine: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan},
lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan},
lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankFin, lockRankTraceBuf, lockRankTraceStrings, lockRankRoot, lockRankNotifyList, lockRankProf, lockRankGcBitsArenas, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankMcentral, lockRankSpine, lockRankSpanSetSpine},
lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankFin, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankRwmutexR, lockRankMcentral, lockRankSpine, lockRankSpanSetSpine, lockRankGscan},
lockRankStackLarge: {lockRankSysmon, lockRankAssistQueue, lockRankSched, lockRankItab, lockRankHchan, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankMcentral, lockRankSpanSetSpine, lockRankGscan},
lockRankDefer: {},
lockRankSudog: {lockRankNotifyList, lockRankHchan},
lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankAllg, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProf, lockRankRoot, lockRankGscan, lockRankDefer, lockRankSudog},
lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan, lockRankMspanSpecial, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankMcentral, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankDefer, lockRankSudog, lockRankWbufSpans, lockRankSpanSetSpine},
lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan},
lockRankGlobalAlloc: {lockRankProf, lockRankSpine, lockRankSpanSetSpine, lockRankMheap, lockRankMheapSpecial},
lockRankGFree: {lockRankSched},
lockRankHchanLeaf: {lockRankGscan, lockRankHchanLeaf},
lockRankNewmHandoff: {},
lockRankDebugPtrmask: {},
lockRankFaketimeState: {},
lockRankTicks: {},
lockRankRaceFini: {},
lockRankPollCache: {},
lockRankDebug: {},
}
mSpanStateNames are the names of the span states, indexed by mSpanState.
var mSpanStateNames = []string{ "mSpanDead", "mSpanInUse", "mSpanManual", "mSpanFree", }
mainStarted indicates that the main M has started.
var mainStarted bool
main_init_done is a signal used by cgocallbackg that initialization has been completed. It is made before _cgo_notify_runtime_init_done, so all cgo calls can rely on it existing. When main_init is complete, it is closed, meaning cgocallbackg can reliably receive from it.
var main_init_done chan bool
Maximum searchAddr value, which indicates that the heap has no free space.
We alias maxOffAddr just to make it clear that this is the maximum address for the page allocator's search space. See maxOffAddr for details.
var maxSearchAddr = maxOffAddr
var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
set using cmd/go/internal/modload.ModInfoProg
var modinfo string
var modulesSlice *[]*moduledata // see activeModules
var mutexprofilerate uint64 // fraction sampled
var nbuf uintptr
newmHandoff contains a list of m structures that need new OS threads. This is used by newm in situations where newm itself can't safely start an OS thread.
var newmHandoff struct { lock mutex // newm points to a list of M structures that need new OS // threads. The list is linked through m.schedlink. newm muintptr // waiting indicates that wake needs to be notified when an m // is put on the list. waiting bool wake note // haveTemplateThread indicates that the templateThread has // been started. This is not protected by lock. Use cas to set // to 1. haveTemplateThread uint32 }
var no_pointers_stackmap uint64 // defined in assembly, for NO_LOCAL_POINTERS macro
ptrmask for an allocation containing a single pointer.
var oneptrmask = [...]uint8{1}
var overflowError = error(errorString("integer overflow"))
var overflowTag [1]unsafe.Pointer // always nil
panicking is non-zero when crashing the program for an unrecovered panic. panicking is incremented and decremented atomically.
var panicking uint32
pendingPreemptSignals is the number of preemption signals that have been sent but not received. This is only used on Darwin. For #41702.
var pendingPreemptSignals uint32
physPageSize is the size in bytes of the OS's physical pages. Mapping and unmapping operations must be done at multiples of physPageSize.
This must be set by the OS init code (typically in osinit) before mallocinit.
var physPageSize uintptr
pinnedTypemaps are the map[typeOff]*_type from the moduledata objects.
These typemap objects are allocated at run time on the heap, but the only direct reference to them is in the moduledata, created by the linker and marked SNOPTRDATA so it is ignored by the GC.
To make sure the map isn't collected, we keep a second reference here.
var pinnedTypemaps []map[typeOff]*_type
var poolcleanup func()
var procAuxv = []byte("/proc/self/auxv\x00")
var prof struct { signalLock uint32 hz int32 }
var ptrnames = []string{ 0: "scalar", 1: "ptr", }
var racecgosync uint64 // represents possible synchronization in C code
reflectOffs holds type offsets defined at run time by the reflect package.
When a type is defined at run time, its *rtype data lives on the heap. There are a wide range of possible addresses the heap may use, that may not be representable as a 32-bit offset. Moreover the GC may one day start moving heap memory, in which case there is no stable offset that can be defined.
To provide stable offsets, we add pin *rtype objects in a global map and treat the offset as an identifier. We use negative offsets that do not overlap with any compile-time module offsets.
Entries are created by reflect.addReflectOff.
var reflectOffs struct { lock mutex next int32 m map[int32]unsafe.Pointer minv map[unsafe.Pointer]int32 }
runningPanicDefers is non-zero while running deferred functions for panic. runningPanicDefers is incremented and decremented atomically. This is used to try hard to get a panic stack trace out when exiting.
var runningPanicDefers uint32
runtimeInitTime is the nanotime() at which the runtime started.
var runtimeInitTime int64
Sleep/wait state of the background scavenger.
var scavenge struct { lock mutex g *g parked bool timer *timer sysmonWake uint32 // Set atomically. }
var semtable [semTabSize]struct { root semaRoot pad [cpu.CacheLinePadSize - unsafe.Sizeof(semaRoot{})]byte }
var shiftError = error(errorString("negative shift amount"))
sig handles communication between the signal handler and os/signal. Other than the inuse and recv fields, the fields are accessed atomically.
The wanted and ignored fields are only written by one goroutine at a time; access is controlled by the handlers Mutex in os/signal. The fields are only read by that one goroutine and by the signal handler. We access them atomically to minimize the race between setting them in the goroutine calling os/signal and the signal handler, which may be running in a different thread. That race is unavoidable, as there is no connection between handling a signal and receiving one, but atomic instructions should minimize it.
var sig struct { note note mask [(_NSIG + 31) / 32]uint32 wanted [(_NSIG + 31) / 32]uint32 ignored [(_NSIG + 31) / 32]uint32 recv [(_NSIG + 31) / 32]uint32 state uint32 delivering uint32 inuse bool }
var signalsOK bool
var sigprofCallersUse uint32
var sigset_all = sigset{^uint32(0), ^uint32(0)}
var sigtable = [...]sigTabT{
{0, "SIGNONE: no trap"},
{_SigNotify + _SigKill, "SIGHUP: terminal line hangup"},
{_SigNotify + _SigKill, "SIGINT: interrupt"},
{_SigNotify + _SigThrow, "SIGQUIT: quit"},
{_SigThrow + _SigUnblock, "SIGILL: illegal instruction"},
{_SigThrow + _SigUnblock, "SIGTRAP: trace trap"},
{_SigNotify + _SigThrow, "SIGABRT: abort"},
{_SigPanic + _SigUnblock, "SIGBUS: bus error"},
{_SigPanic + _SigUnblock, "SIGFPE: floating-point exception"},
{0, "SIGKILL: kill"},
{_SigNotify, "SIGUSR1: user-defined signal 1"},
{_SigPanic + _SigUnblock, "SIGSEGV: segmentation violation"},
{_SigNotify, "SIGUSR2: user-defined signal 2"},
{_SigNotify, "SIGPIPE: write to broken pipe"},
{_SigNotify, "SIGALRM: alarm clock"},
{_SigNotify + _SigKill, "SIGTERM: termination"},
{_SigThrow + _SigUnblock, "SIGSTKFLT: stack fault"},
{_SigNotify + _SigUnblock + _SigIgn, "SIGCHLD: child status has changed"},
{_SigNotify + _SigDefault + _SigIgn, "SIGCONT: continue"},
{0, "SIGSTOP: stop, unblockable"},
{_SigNotify + _SigDefault + _SigIgn, "SIGTSTP: keyboard stop"},
{_SigNotify + _SigDefault + _SigIgn, "SIGTTIN: background read from tty"},
{_SigNotify + _SigDefault + _SigIgn, "SIGTTOU: background write to tty"},
{_SigNotify + _SigIgn, "SIGURG: urgent condition on socket"},
{_SigNotify, "SIGXCPU: cpu limit exceeded"},
{_SigNotify, "SIGXFSZ: file size limit exceeded"},
{_SigNotify, "SIGVTALRM: virtual alarm clock"},
{_SigNotify + _SigUnblock, "SIGPROF: profiling alarm clock"},
{_SigNotify + _SigIgn, "SIGWINCH: window size change"},
{_SigNotify, "SIGIO: i/o now possible"},
{_SigNotify, "SIGPWR: power failure restart"},
{_SigThrow, "SIGSYS: bad system call"},
{_SigSetStack + _SigUnblock, "signal 32"},
{_SigSetStack + _SigUnblock, "signal 33"},
{_SigNotify, "signal 34"},
{_SigNotify, "signal 35"},
{_SigNotify, "signal 36"},
{_SigNotify, "signal 37"},
{_SigNotify, "signal 38"},
{_SigNotify, "signal 39"},
{_SigNotify, "signal 40"},
{_SigNotify, "signal 41"},
{_SigNotify, "signal 42"},
{_SigNotify, "signal 43"},
{_SigNotify, "signal 44"},
{_SigNotify, "signal 45"},
{_SigNotify, "signal 46"},
{_SigNotify, "signal 47"},
{_SigNotify, "signal 48"},
{_SigNotify, "signal 49"},
{_SigNotify, "signal 50"},
{_SigNotify, "signal 51"},
{_SigNotify, "signal 52"},
{_SigNotify, "signal 53"},
{_SigNotify, "signal 54"},
{_SigNotify, "signal 55"},
{_SigNotify, "signal 56"},
{_SigNotify, "signal 57"},
{_SigNotify, "signal 58"},
{_SigNotify, "signal 59"},
{_SigNotify, "signal 60"},
{_SigNotify, "signal 61"},
{_SigNotify, "signal 62"},
{_SigNotify, "signal 63"},
{_SigNotify, "signal 64"},
}
var size_to_class128 = [(_MaxSmallSize-smallSizeMax)/largeSizeDiv + 1]uint8{31, 32, 33, 34, 35, 36, 36, 37, 37, 38, 38, 39, 39, 39, 40, 40, 40, 41, 42, 42, 43, 43, 43, 43, 43, 44, 44, 44, 44, 44, 44, 45, 45, 45, 45, 46, 46, 46, 46, 46, 46, 47, 47, 47, 48, 48, 49, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 52, 52, 53, 53, 53, 53, 54, 54, 54, 54, 54, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 57, 57, 57, 57, 57, 57, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 60, 60, 60, 60, 60, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66}
var size_to_class8 = [smallSizeMax/smallSizeDiv + 1]uint8{0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31}
Size of the trailing by_size array differs between mstats and MemStats, and all data after by_size is local to runtime, not exported. NumSizeClasses was changed, but we cannot change MemStats because of backward compatibility. sizeof_C_MStats is the size of the prefix of mstats that corresponds to MemStats. It should match Sizeof(MemStats{}).
var sizeof_C_MStats = unsafe.Offsetof(memstats.by_size) + 61*unsafe.Sizeof(memstats.by_size[0])
var skipPC uintptr
Global pool of large stack spans.
var stackLarge struct { lock mutex free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages) }
Global pool of spans that have free stacks. Stacks are assigned an order according to size.
order = log_2(size/FixedStack)
There is a free list for each order.
var stackpool [_NumStackOrders]struct { item stackpoolItem _ [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte }
var starttime int64
startup_random_data holds random bytes initialized at startup. These come from the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
var startupRandomData []byte
staticuint64s is used to avoid allocating in convTx for small integer values.
var staticuint64s = [...]uint64{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, }
var sysTHPSizePath = []byte("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size\x00")
testSigtrap and testSigusr1 are used by the runtime tests. If non-nil, it is called on SIGTRAP/SIGUSR1. If it returns true, the normal behavior on this signal is suppressed.
var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool
var testSigusr1 func(gp *g) bool
TODO: These should be locals in testAtomic64, but we don't 8-byte align stack variables on 386.
var test_z64, test_x64 uint64
throwOnGCWork causes any operations that add pointers to a gcWork buffer to throw.
TODO(austin): This is a temporary debugging measure for issue #27993. To be removed before release.
var throwOnGCWork bool
throwReportQuirk, if non-nil, is called by throw after dumping the stacks.
TODO(austin): Remove this after Go 1.15 when we remove the mlockGsignal workaround.
var throwReportQuirk func()
var ticks struct { lock mutex pad uint32 // ensure 8-byte alignment of val on 386 val uint64 }
var tmpbuf []byte
touchStackBeforeSignal stores an errno value. If non-zero, it means that we should touch the signal stack before sending a signal. This is used on systems that have a bug when the signal stack must be faulted in. See #35777 and #37436.
This is accessed atomically as it is set and read in different threads.
TODO(austin): Remove this after Go 1.15 when we remove the mlockGsignal workaround.
var touchStackBeforeSignal uint32
trace is global tracing context.
var trace struct { lock mutex // protects the following members lockOwner *g // to avoid deadlocks during recursive lock locks enabled bool // when set runtime traces events shutdown bool // set when we are waiting for trace reader to finish after setting enabled to false headerWritten bool // whether ReadTrace has emitted trace header footerWritten bool // whether ReadTrace has emitted trace footer shutdownSema uint32 // used to wait for ReadTrace completion seqStart uint64 // sequence number when tracing was started ticksStart int64 // cputicks when tracing was started ticksEnd int64 // cputicks when tracing was stopped timeStart int64 // nanotime when tracing was started timeEnd int64 // nanotime when tracing was stopped seqGC uint64 // GC start/done sequencer reading traceBufPtr // buffer currently handed off to user empty traceBufPtr // stack of empty buffers fullHead traceBufPtr // queue of full buffers fullTail traceBufPtr reader guintptr // goroutine that called ReadTrace, or nil stackTab traceStackTable // maps stack traces to unique ids // Dictionary for traceEvString. // // TODO: central lock to access the map is not ideal. // option: pre-assign ids to all user annotation region names and tags // option: per-P cache // option: sync.Map like data structure stringsLock mutex strings map[string]uint64 stringSeq uint64 // markWorkerLabels maps gcMarkWorkerMode to string ID. markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64 bufLock mutex // protects buf buf traceBufPtr // global trace buffer, used when running without a p }
var traceback_cache uint32 = 2 << tracebackShift
var traceback_env uint32
var typecache [typeCacheBuckets]typeCacheBucket
var urandom_dev = []byte("/dev/urandom\x00")
var useAVXmemmove bool
runtime variable to check if the processor we're running on actually supports the instructions used by the AES-based hash implementation.
var useAeshash bool
If useCheckmark is true, marking of an object uses the checkmark bits (encoding above) instead of the standard mark bits.
var useCheckmark = false
var vdsoLinuxVersion = vdsoVersionKey{"LINUX_2.6", 0x3ae75f6}
var vdsoSymbolKeys = []vdsoSymbolKey{
{"__vdso_gettimeofday", 0x315ca59, 0xb01bca00, &vdsoGettimeofdaySym},
{"__vdso_clock_gettime", 0xd35ec75, 0x6e43a318, &vdsoClockgettimeSym},
}
var waitReasonStrings = [...]string{ waitReasonZero: "", waitReasonGCAssistMarking: "GC assist marking", waitReasonIOWait: "IO wait", waitReasonChanReceiveNilChan: "chan receive (nil chan)", waitReasonChanSendNilChan: "chan send (nil chan)", waitReasonDumpingHeap: "dumping heap", waitReasonGarbageCollection: "garbage collection", waitReasonGarbageCollectionScan: "garbage collection scan", waitReasonPanicWait: "panicwait", waitReasonSelect: "select", waitReasonSelectNoCases: "select (no cases)", waitReasonGCAssistWait: "GC assist wait", waitReasonGCSweepWait: "GC sweep wait", waitReasonGCScavengeWait: "GC scavenge wait", waitReasonChanReceive: "chan receive", waitReasonChanSend: "chan send", waitReasonFinalizerWait: "finalizer wait", waitReasonForceGCIdle: "force gc (idle)", waitReasonSemacquire: "semacquire", waitReasonSleep: "sleep", waitReasonSyncCondWait: "sync.Cond.Wait", waitReasonTimerGoroutineIdle: "timer goroutine (idle)", waitReasonTraceReaderBlocked: "trace reader (blocked)", waitReasonWaitForGCCycle: "wait for GC cycle", waitReasonGCWorkerIdle: "GC worker (idle)", waitReasonPreempted: "preempted", waitReasonDebugCall: "debug call", }
var work struct { full lfstack // lock-free list of full blocks workbuf empty lfstack // lock-free list of empty blocks workbuf pad0 cpu.CacheLinePad // prevents false-sharing between full/empty and nproc/nwait wbufSpans struct { lock mutex // free is a list of spans dedicated to workbufs, but // that don't currently contain any workbufs. free mSpanList // busy is a list of all spans containing workbufs on // one of the workbuf lists. busy mSpanList } // Restore 64-bit alignment on 32-bit. _ uint32 // bytesMarked is the number of bytes marked this cycle. This // includes bytes blackened in scanned objects, noscan objects // that go straight to black, and permagrey objects scanned by // markroot during the concurrent scan phase. This is updated // atomically during the cycle. Updates may be batched // arbitrarily, since the value is only read at the end of the // cycle. // // Because of benign races during marking, this number may not // be the exact number of marked bytes, but it should be very // close. // // Put this field here because it needs 64-bit atomic access // (and thus 8-byte alignment even on 32-bit architectures). bytesMarked uint64 markrootNext uint32 // next markroot job markrootJobs uint32 // number of markroot jobs nproc uint32 tstart int64 nwait uint32 ndone uint32 // Number of roots of various root types. Set by gcMarkRootPrepare. nFlushCacheRoots int nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int // Each type of GC state transition is protected by a lock. // Since multiple threads can simultaneously detect the state // transition condition, any thread that detects a transition // condition must acquire the appropriate transition lock, // re-check the transition condition and return if it no // longer holds or perform the transition if it does. // Likewise, any transition must invalidate the transition // condition before releasing the lock. This ensures that each // transition is performed by exactly one thread and threads // that need the transition to happen block until it has // happened. // // startSema protects the transition from "off" to mark or // mark termination. startSema uint32 // markDoneSema protects transitions from mark to mark termination. markDoneSema uint32 bgMarkReady note // signal background mark worker has started bgMarkDone uint32 // cas to 1 when at a background mark completion point // mode is the concurrency mode of the current GC cycle. mode gcMode // userForced indicates the current GC cycle was forced by an // explicit user call. userForced bool // totaltime is the CPU nanoseconds spent in GC since the // program started if debug.gctrace > 0. totaltime int64 // initialHeapLive is the value of memstats.heap_live at the // beginning of this GC cycle. initialHeapLive uint64 // assistQueue is a queue of assists that are blocked because // there was neither enough credit to steal or enough work to // do. assistQueue struct { lock mutex q gQueue } // sweepWaiters is a list of blocked goroutines to wake when // we transition from mark termination to sweep. sweepWaiters struct { lock mutex list gList } // cycles is the number of completed GC cycles, where a GC // cycle is sweep termination, mark, mark termination, and // sweep. This differs from memstats.numgc, which is // incremented at mark termination. cycles uint32 // Timing/utilization stats for this cycle. stwprocs, maxprocs int32 tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start pauseNS int64 // total STW time this cycle pauseStart int64 // nanotime() of last STW // debug.gctrace heap sizes for this cycle. heap0, heap1, heap2, heapGoal uint64 }
Holding worldsema grants an M the right to try to stop the world.
var worldsema uint32 = 1
The compiler knows about this variable. If you change it, you must change builtin/runtime.go, too. If you change the first four bytes, you must also change the write barrier insertion code.
var writeBarrier struct { enabled bool // compiler emits a check of this before calling write barrier pad [3]byte // compiler uses 32-bit load for "enabled" field needed bool // whether we need a write barrier for current GC phase cgo bool // whether we need a write barrier for a cgo check alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load }
var zeroVal [maxZero]byte
base address for all 0-byte allocations
var zerobase uintptr
func BlockProfile ¶ 1.1
func BlockProfile(p []BlockProfileRecord) (n int, ok bool)
BlockProfile returns n, the number of records in the current blocking profile. If len(p) >= n, BlockProfile copies the profile into p and returns n, true. If len(p) < n, BlockProfile does not change p and returns n, false.
Most clients should use the runtime/pprof package or the testing package's -test.blockprofile flag instead of calling BlockProfile directly.
func Breakpoint ¶
func Breakpoint()
Breakpoint executes a breakpoint trap.
func CPUProfile ¶
func CPUProfile() []byte
CPUProfile panics. It formerly provided raw access to chunks of a pprof-format profile generated by the runtime. The details of generating that format have changed, so this functionality has been removed.
Deprecated: Use the runtime/pprof package, or the handlers in the net/http/pprof package, or the testing package's -test.cpuprofile flag instead.
func Caller ¶
func Caller(skip int) (pc uintptr, file string, line int, ok bool)
Caller reports file and line number information about function invocations on the calling goroutine's stack. The argument skip is the number of stack frames to ascend, with 0 identifying the caller of Caller. (For historical reasons the meaning of skip differs between Caller and Callers.) The return values report the program counter, file name, and line number within the file of the corresponding call. The boolean ok is false if it was not possible to recover the information.
func Callers ¶
func Callers(skip int, pc []uintptr) int
Callers fills the slice pc with the return program counters of function invocations on the calling goroutine's stack. The argument skip is the number of stack frames to skip before recording in pc, with 0 identifying the frame for Callers itself and 1 identifying the caller of Callers. It returns the number of entries written to pc.
To translate these PCs into symbolic information such as function names and line numbers, use CallersFrames. CallersFrames accounts for inlined functions and adjusts the return program counters into call program counters. Iterating over the returned slice of PCs directly is discouraged, as is using FuncForPC on any of the returned PCs, since these cannot account for inlining or return program counter adjustment.
func GC ¶
func GC()
GC runs a garbage collection and blocks the caller until the garbage collection is complete. It may also block the entire program.
func GOMAXPROCS ¶
func GOMAXPROCS(n int) int
GOMAXPROCS sets the maximum number of CPUs that can be executing simultaneously and returns the previous setting. If n < 1, it does not change the current setting. The number of logical CPUs on the local machine can be queried with NumCPU. This call will go away when the scheduler improves.
func GOROOT ¶
func GOROOT() string
GOROOT returns the root of the Go tree. It uses the GOROOT environment variable, if set at process start, or else the root used during the Go build.
func Goexit ¶
func Goexit()
Goexit terminates the goroutine that calls it. No other goroutine is affected. Goexit runs all deferred calls before terminating the goroutine. Because Goexit is not a panic, any recover calls in those deferred functions will return nil.
Calling Goexit from the main goroutine terminates that goroutine without func main returning. Since func main has not returned, the program continues execution of other goroutines. If all other goroutines exit, the program crashes.
func GoroutineProfile ¶
func GoroutineProfile(p []StackRecord) (n int, ok bool)
GoroutineProfile returns n, the number of records in the active goroutine stack profile. If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true. If len(p) < n, GoroutineProfile does not change p and returns n, false.
Most clients should use the runtime/pprof package instead of calling GoroutineProfile directly.
func Gosched ¶
func Gosched()
Gosched yields the processor, allowing other goroutines to run. It does not suspend the current goroutine, so execution resumes automatically.
func KeepAlive ¶ 1.7
func KeepAlive(x interface{})
KeepAlive marks its argument as currently reachable. This ensures that the object is not freed, and its finalizer is not run, before the point in the program where KeepAlive is called.
A very simplified example showing where KeepAlive is required:
type File struct { d int } d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0) // ... do something if err != nil ... p := &File{d} runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) }) var buf [10]byte n, err := syscall.Read(p.d, buf[:]) // Ensure p is not finalized until Read returns. runtime.KeepAlive(p) // No more uses of p after this point.
Without the KeepAlive call, the finalizer could run at the start of syscall.Read, closing the file descriptor before syscall.Read makes the actual system call.
func LockOSThread ¶
func LockOSThread()
LockOSThread wires the calling goroutine to its current operating system thread. The calling goroutine will always execute in that thread, and no other goroutine will execute in it, until the calling goroutine has made as many calls to UnlockOSThread as to LockOSThread. If the calling goroutine exits without unlocking the thread, the thread will be terminated.
All init functions are run on the startup thread. Calling LockOSThread from an init function will cause the main function to be invoked on that thread.
A goroutine should call LockOSThread before calling OS services or non-Go library functions that depend on per-thread state.
func MemProfile ¶
func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool)
MemProfile returns a profile of memory allocated and freed per allocation site.
MemProfile returns n, the number of records in the current memory profile. If len(p) >= n, MemProfile copies the profile into p and returns n, true. If len(p) < n, MemProfile does not change p and returns n, false.
If inuseZero is true, the profile includes allocation records where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes. These are sites where memory was allocated, but it has all been released back to the runtime.
The returned profile may be up to two garbage collection cycles old. This is to avoid skewing the profile toward allocations; because allocations happen in real time but frees are delayed until the garbage collector performs sweeping, the profile only accounts for allocations that have had a chance to be freed by the garbage collector.
Most clients should use the runtime/pprof package or the testing package's -test.memprofile flag instead of calling MemProfile directly.
func MutexProfile ¶ 1.8
func MutexProfile(p []BlockProfileRecord) (n int, ok bool)
MutexProfile returns n, the number of records in the current mutex profile. If len(p) >= n, MutexProfile copies the profile into p and returns n, true. Otherwise, MutexProfile does not change p, and returns n, false.
Most clients should use the runtime/pprof package instead of calling MutexProfile directly.
func NumCPU ¶
func NumCPU() int
NumCPU returns the number of logical CPUs usable by the current process.
The set of available CPUs is checked by querying the operating system at process startup. Changes to operating system CPU allocation after process startup are not reflected.
func NumCgoCall ¶
func NumCgoCall() int64
NumCgoCall returns the number of cgo calls made by the current process.
func NumGoroutine ¶
func NumGoroutine() int
NumGoroutine returns the number of goroutines that currently exist.
func ReadMemStats ¶
func ReadMemStats(m *MemStats)
ReadMemStats populates m with memory allocator statistics.
The returned memory allocator statistics are up to date as of the call to ReadMemStats. This is in contrast with a heap profile, which is a snapshot as of the most recently completed garbage collection cycle.
func ReadTrace ¶ 1.5
func ReadTrace() []byte
ReadTrace returns the next chunk of binary tracing data, blocking until data is available. If tracing is turned off and all the data accumulated while it was on has been returned, ReadTrace returns nil. The caller must copy the returned data before calling ReadTrace again. ReadTrace must be called from one goroutine at a time.
func SetBlockProfileRate ¶ 1.1
func SetBlockProfileRate(rate int)
SetBlockProfileRate controls the fraction of goroutine blocking events that are reported in the blocking profile. The profiler aims to sample an average of one blocking event per rate nanoseconds spent blocked.
To include every blocking event in the profile, pass rate = 1. To turn off profiling entirely, pass rate <= 0.
func SetCPUProfileRate ¶
func SetCPUProfileRate(hz int)
SetCPUProfileRate sets the CPU profiling rate to hz samples per second. If hz <= 0, SetCPUProfileRate turns off profiling. If the profiler is on, the rate cannot be changed without first turning it off.
Most clients should use the runtime/pprof package or the testing package's -test.cpuprofile flag instead of calling SetCPUProfileRate directly.
func SetCgoTraceback ¶ 1.7
func SetCgoTraceback(version int, traceback, context, symbolizer unsafe.Pointer)
SetCgoTraceback records three C functions to use to gather traceback information from C code and to convert that traceback information into symbolic information. These are used when printing stack traces for a program that uses cgo.
The traceback and context functions may be called from a signal handler, and must therefore use only async-signal safe functions. The symbolizer function may be called while the program is crashing, and so must be cautious about using memory. None of the functions may call back into Go.
The context function will be called with a single argument, a pointer to a struct:
struct { Context uintptr }
In C syntax, this struct will be
struct { uintptr_t Context; };
If the Context field is 0, the context function is being called to record the current traceback context. It should record in the Context field whatever information is needed about the current point of execution to later produce a stack trace, probably the stack pointer and PC. In this case the context function will be called from C code.
If the Context field is not 0, then it is a value returned by a previous call to the context function. This case is called when the context is no longer needed; that is, when the Go code is returning to its C code caller. This permits the context function to release any associated resources.
While it would be correct for the context function to record a complete a stack trace whenever it is called, and simply copy that out in the traceback function, in a typical program the context function will be called many times without ever recording a traceback for that context. Recording a complete stack trace in a call to the context function is likely to be inefficient.
The traceback function will be called with a single argument, a pointer to a struct:
struct { Context uintptr SigContext uintptr Buf *uintptr Max uintptr }
In C syntax, this struct will be
struct { uintptr_t Context; uintptr_t SigContext; uintptr_t* Buf; uintptr_t Max; };
The Context field will be zero to gather a traceback from the current program execution point. In this case, the traceback function will be called from C code.
Otherwise Context will be a value previously returned by a call to the context function. The traceback function should gather a stack trace from that saved point in the program execution. The traceback function may be called from an execution thread other than the one that recorded the context, but only when the context is known to be valid and unchanging. The traceback function may also be called deeper in the call stack on the same thread that recorded the context. The traceback function may be called multiple times with the same Context value; it will usually be appropriate to cache the result, if possible, the first time this is called for a specific context value.
If the traceback function is called from a signal handler on a Unix system, SigContext will be the signal context argument passed to the signal handler (a C ucontext_t* cast to uintptr_t). This may be used to start tracing at the point where the signal occurred. If the traceback function is not called from a signal handler, SigContext will be zero.
Buf is where the traceback information should be stored. It should be PC values, such that Buf[0] is the PC of the caller, Buf[1] is the PC of that function's caller, and so on. Max is the maximum number of entries to store. The function should store a zero to indicate the top of the stack, or that the caller is on a different stack, presumably a Go stack.
Unlike runtime.Callers, the PC values returned should, when passed to the symbolizer function, return the file/line of the call instruction. No additional subtraction is required or appropriate.
On all platforms, the traceback function is invoked when a call from Go to C to Go requests a stack trace. On linux/amd64, linux/ppc64le, and freebsd/amd64, the traceback function is also invoked when a signal is received by a thread that is executing a cgo call. The traceback function should not make assumptions about when it is called, as future versions of Go may make additional calls.
The symbolizer function will be called with a single argument, a pointer to a struct:
struct { PC uintptr // program counter to fetch information for File *byte // file name (NUL terminated) Lineno uintptr // line number Func *byte // function name (NUL terminated) Entry uintptr // function entry point More uintptr // set non-zero if more info for this PC Data uintptr // unused by runtime, available for function }
In C syntax, this struct will be
struct { uintptr_t PC; char* File; uintptr_t Lineno; char* Func; uintptr_t Entry; uintptr_t More; uintptr_t Data; };
The PC field will be a value returned by a call to the traceback function.
The first time the function is called for a particular traceback, all the fields except PC will be 0. The function should fill in the other fields if possible, setting them to 0/nil if the information is not available. The Data field may be used to store any useful information across calls. The More field should be set to non-zero if there is more information for this PC, zero otherwise. If More is set non-zero, the function will be called again with the same PC, and may return different information (this is intended for use with inlined functions). If More is zero, the function will be called with the next PC value in the traceback. When the traceback is complete, the function will be called once more with PC set to zero; this may be used to free any information. Each call will leave the fields of the struct set to the same values they had upon return, except for the PC field when the More field is zero. The function must not keep a copy of the struct pointer between calls.
When calling SetCgoTraceback, the version argument is the version number of the structs that the functions expect to receive. Currently this must be zero.
The symbolizer function may be nil, in which case the results of the traceback function will be displayed as numbers. If the traceback function is nil, the symbolizer function will never be called. The context function may be nil, in which case the traceback function will only be called with the context field set to zero. If the context function is nil, then calls from Go to C to Go will not show a traceback for the C portion of the call stack.
SetCgoTraceback should be called only once, ideally from an init function.
func SetFinalizer ¶
func SetFinalizer(obj interface{}, finalizer interface{})
SetFinalizer sets the finalizer associated with obj to the provided finalizer function. When the garbage collector finds an unreachable block with an associated finalizer, it clears the association and runs finalizer(obj) in a separate goroutine. This makes obj reachable again, but now without an associated finalizer. Assuming that SetFinalizer is not called again, the next time the garbage collector sees that obj is unreachable, it will free obj.
SetFinalizer(obj, nil) clears any finalizer associated with obj.
The argument obj must be a pointer to an object allocated by calling new, by taking the address of a composite literal, or by taking the address of a local variable. The argument finalizer must be a function that takes a single argument to which obj's type can be assigned, and can have arbitrary ignored return values. If either of these is not true, SetFinalizer may abort the program.
Finalizers are run in dependency order: if A points at B, both have finalizers, and they are otherwise unreachable, only the finalizer for A runs; once A is freed, the finalizer for B can run. If a cyclic structure includes a block with a finalizer, that cycle is not guaranteed to be garbage collected and the finalizer is not guaranteed to run, because there is no ordering that respects the dependencies.
The finalizer is scheduled to run at some arbitrary time after the program can no longer reach the object to which obj points. There is no guarantee that finalizers will run before a program exits, so typically they are useful only for releasing non-memory resources associated with an object during a long-running program. For example, an os.File object could use a finalizer to close the associated operating system file descriptor when a program discards an os.File without calling Close, but it would be a mistake to depend on a finalizer to flush an in-memory I/O buffer such as a bufio.Writer, because the buffer would not be flushed at program exit.
It is not guaranteed that a finalizer will run if the size of *obj is zero bytes.
It is not guaranteed that a finalizer will run for objects allocated in initializers for package-level variables. Such objects may be linker-allocated, not heap-allocated.
A finalizer may run as soon as an object becomes unreachable. In order to use finalizers correctly, the program must ensure that the object is reachable until it is no longer required. Objects stored in global variables, or that can be found by tracing pointers from a global variable, are reachable. For other objects, pass the object to a call of the KeepAlive function to mark the last point in the function where the object must be reachable.
For example, if p points to a struct that contains a file descriptor d, and p has a finalizer that closes that file descriptor, and if the last use of p in a function is a call to syscall.Write(p.d, buf, size), then p may be unreachable as soon as the program enters syscall.Write. The finalizer may run at that moment, closing p.d, causing syscall.Write to fail because it is writing to a closed file descriptor (or, worse, to an entirely different file descriptor opened by a different goroutine). To avoid this problem, call runtime.KeepAlive(p) after the call to syscall.Write.
A single goroutine runs all finalizers for a program, sequentially. If a finalizer must run for a long time, it should do so by starting a new goroutine.
func SetMutexProfileFraction ¶ 1.8
func SetMutexProfileFraction(rate int) int
SetMutexProfileFraction controls the fraction of mutex contention events that are reported in the mutex profile. On average 1/rate events are reported. The previous rate is returned.
To turn off profiling entirely, pass rate 0. To just read the current rate, pass rate < 0. (For n>1 the details of sampling may change.)
func Stack ¶
func Stack(buf []byte, all bool) int
Stack formats a stack trace of the calling goroutine into buf and returns the number of bytes written to buf. If all is true, Stack formats stack traces of all other goroutines into buf after the trace for the current goroutine.
func StartTrace ¶ 1.5
func StartTrace() error
StartTrace enables tracing for the current process. While tracing, the data will be buffered and available via ReadTrace. StartTrace returns an error if tracing is already enabled. Most clients should use the runtime/trace package or the testing package's -test.trace flag instead of calling StartTrace directly.
func StopTrace ¶ 1.5
func StopTrace()
StopTrace stops tracing, if it was previously enabled. StopTrace only returns after all the reads for the trace have completed.
func ThreadCreateProfile ¶
func ThreadCreateProfile(p []StackRecord) (n int, ok bool)
ThreadCreateProfile returns n, the number of records in the thread creation profile. If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true. If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
Most clients should use the runtime/pprof package instead of calling ThreadCreateProfile directly.
func UnlockOSThread ¶
func UnlockOSThread()
UnlockOSThread undoes an earlier call to LockOSThread. If this drops the number of active LockOSThread calls on the calling goroutine to zero, it unwires the calling goroutine from its fixed operating system thread. If there are no active LockOSThread calls, this is a no-op.
Before calling UnlockOSThread, the caller must ensure that the OS thread is suitable for running other goroutines. If the caller made any permanent changes to the state of the thread that would affect other goroutines, it should not call this function and thus leave the goroutine locked to the OS thread until the goroutine (and hence the thread) exits.
func Version ¶
func Version() string
Version returns the Go tree's version string. It is either the commit hash and date at the time of the build or, when possible, a release tag like "go1.3".
func _ELF_ST_BIND ¶
func _ELF_ST_BIND(val byte) byte
How to extract and insert information held in the st_info field.
func _ELF_ST_TYPE ¶
func _ELF_ST_TYPE(val byte) byte
func _ExternalCode ¶
func _ExternalCode()
func _GC ¶
func _GC()
func _LostExternalCode ¶
func _LostExternalCode()
func _LostSIGPROFDuringAtomic64 ¶
func _LostSIGPROFDuringAtomic64()
func _System ¶
func _System()
func _VDSO ¶
func _VDSO()
func _cgo_panic_internal ¶
func _cgo_panic_internal(p *byte)
func abort ¶
func abort()
abort crashes the runtime in situations where even throw might not work. In general it should do something a debugger will recognize (e.g., an INT3 on x86). A crash in abort is recognized by the signal handler, which will attempt to tear down the runtime immediately.
func abs ¶
func abs(x float64) float64
Abs returns the absolute value of x.
Special cases are:
Abs(±Inf) = +Inf Abs(NaN) = NaN
func access ¶
func access(name *byte, mode int32) int32
Called from write_err_android.go only, but defined in sys_linux_*.s; declared here (instead of in write_err_android.go) for go vet on non-android builds. The return value is the raw syscall result, which may encode an error number.
func acquireLockRank ¶
func acquireLockRank(rank lockRank)
func acquirep ¶
func acquirep(_p_ *p)
Associate p and the current m.
This function is allowed to have write barriers even if the caller isn't because it immediately acquires _p_.
func add ¶
func add(p unsafe.Pointer, x uintptr) unsafe.Pointer
Should be a built-in for unsafe.Pointer?
func add1 ¶
func add1(p *byte) *byte
add1 returns the byte pointer p+1.
func addAdjustedTimers ¶
func addAdjustedTimers(pp *p, moved []*timer)
addAdjustedTimers adds any timers we adjusted in adjusttimers back to the timer heap.
func addOneOpenDeferFrame ¶
func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer)
addOneOpenDeferFrame scans the stack for the first frame (if any) with open-coded defers and if it finds one, adds a single record to the defer chain for that frame. If sp is non-nil, it starts the stack scan from the frame specified by sp. If sp is nil, it uses the sp from the current defer record (which has just been finished). Hence, it continues the stack scan from the frame of the defer that just finished. It skips any frame that already has an open-coded _defer record, which would have been been created from a previous (unrecovered) panic.
Note: All entries of the defer chain (including this new open-coded entry) have their pointers (including sp) adjusted properly if the stack moves while running deferred functions. Also, it is safe to pass in the sp arg (which is the direct result of calling getcallersp()), because all pointer variables (including arguments) are adjusted as needed during stack copies.
func addb ¶
func addb(p *byte, n uintptr) *byte
addb returns the byte pointer p+n.
func addfinalizer ¶
func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool
Adds a finalizer to the object p. Returns true if it succeeded.
func addmoduledata ¶
func addmoduledata()
Called from linker-generated .initarray; declared for go vet; do NOT call from Go.
func addrsToSummaryRange ¶
func addrsToSummaryRange(level int, base, limit uintptr) (lo int, hi int)
addrsToSummaryRange converts base and limit pointers into a range of entries for the given summary level.
The returned range is inclusive on the lower bound and exclusive on the upper bound.
func addspecial ¶
func addspecial(p unsafe.Pointer, s *special) bool
Adds the special record s to the list of special records for the object p. All fields of s should be filled in except for offset & next, which this routine will fill in. Returns true if the special was successfully added, false otherwise. (The add will fail only if a record with the same p and s->kind
already exists.)
func addtimer ¶
func addtimer(t *timer)
addtimer adds a timer to the current P. This should only be called with a newly created timer. That avoids the risk of changing the when field of a timer in some P's heap, which could cause the heap to become unsorted.
func adjustSignalStack ¶
func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool
adjustSignalStack adjusts the current stack guard based on the stack pointer that is actually in use while handling a signal. We do this in case some non-Go code called sigaltstack. This reports whether the stack was adjusted, and if so stores the old signal stack in *gsigstack.
func adjustctxt ¶
func adjustctxt(gp *g, adjinfo *adjustinfo)
func adjustdefers ¶
func adjustdefers(gp *g, adjinfo *adjustinfo)
func adjustframe ¶
func adjustframe(frame *stkframe, arg unsafe.Pointer) bool
Note: the argument/return area is adjusted by the callee.
func adjustpanics ¶
func adjustpanics(gp *g, adjinfo *adjustinfo)
func adjustpointer ¶
func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer)
Adjustpointer checks whether *vpp is in the old stack described by adjinfo. If so, it rewrites *vpp to point into the new stack.
func adjustpointers ¶
func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo)
bv describes the memory starting at address scanp. Adjust any pointers contained therein.
func adjustsudogs ¶
func adjustsudogs(gp *g, adjinfo *adjustinfo)
func adjusttimers ¶
func adjusttimers(pp *p)
adjusttimers looks through the timers in the current P's heap for any timers that have been modified to run earlier, and puts them in the correct place in the heap. While looking for those timers, it also moves timers that have been modified to run later, and removes deleted timers. The caller must have locked the timers for pp.
func advanceEvacuationMark ¶
func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr)
func afterfork ¶
func afterfork()
func alginit ¶
func alginit()
func alignDown ¶
func alignDown(n, a uintptr) uintptr
alignDown rounds n down to a multiple of a. a must be a power of 2.
func alignUp ¶
func alignUp(n, a uintptr) uintptr
alignUp rounds n up to a multiple of a. a must be a power of 2.
func allgadd ¶
func allgadd(gp *g)
func appendIntStr ¶
func appendIntStr(b []byte, v int64, signed bool) []byte
func archauxv ¶
func archauxv(tag, val uintptr)
func arenaBase ¶
func arenaBase(i arenaIdx) uintptr
arenaBase returns the low address of the region covered by heap arena i.
func args ¶
func args(c int32, v **byte)
func argv_index ¶
func argv_index(argv **byte, i int32) *byte
nosplit for use in linux startup sysargs
func asmcgocall ¶
func asmcgocall(fn, arg unsafe.Pointer) int32
func asminit ¶
func asminit()
func assertE2I2 ¶
func assertE2I2(inter *interfacetype, e eface) (r iface, b bool)
func assertI2I2 ¶
func assertI2I2(inter *interfacetype, i iface) (r iface, b bool)
func asyncPreempt ¶
func asyncPreempt()
asyncPreempt saves all user registers and calls asyncPreempt2.
When stack scanning encounters an asyncPreempt frame, it scans that frame and its parent frame conservatively.
asyncPreempt is implemented in assembly.
func asyncPreempt2 ¶
func asyncPreempt2()
func atoi ¶
func atoi(s string) (int, bool)
atoi parses an int from a string s. The bool result reports whether s is a number representable by a value of type int.
func atoi32 ¶
func atoi32(s string) (int32, bool)
atoi32 is like atoi but for integers that fit into an int32.
func atomicstorep ¶
func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer)
atomicstorep performs *ptr = new atomically and invokes a write barrier.
func atomicwb ¶
func atomicwb(ptr *unsafe.Pointer, new unsafe.Pointer)
atomicwb performs a write barrier before an atomic pointer write. The caller should guard the call with "if writeBarrier.enabled".
func badPointer ¶
func badPointer(s *mspan, p, refBase, refOff uintptr)
badPointer throws bad pointer in heap panic.
func badTimer ¶
func badTimer()
badTimer is called if the timer data structures have been corrupted, presumably due to racy use by the program. We panic here rather than panicing due to invalid slice access while holding locks. See issue #25686.
func badcgocallback ¶
func badcgocallback()
called from assembly
func badctxt ¶
func badctxt()
func badmcall ¶
func badmcall(fn func(*g))
called from assembly
func badmcall2 ¶
func badmcall2(fn func(*g))
func badmorestackg0 ¶
func badmorestackg0()
func badmorestackgsignal ¶
func badmorestackgsignal()
func badreflectcall ¶
func badreflectcall()
func badsignal ¶
func badsignal(sig uintptr, c *sigctxt)
This runs on a foreign stack, without an m or a g. No stack split.
func badsystemstack ¶
func badsystemstack()
func badunlockosthread ¶
func badunlockosthread()
func beforeIdle ¶
func beforeIdle(int64) (*g, bool)
func beforefork ¶
func beforefork()
func bgscavenge ¶
func bgscavenge(c chan int)
Background scavenger.
The background scavenger maintains the RSS of the application below the line described by the proportional scavenging statistics in the mheap struct.
func bgsweep ¶
func bgsweep(c chan int)
func binarySearchTree ¶
func binarySearchTree(x *stackObjectBuf, idx int, n int) (root *stackObject, restBuf *stackObjectBuf, restIdx int)
Build a binary search tree with the n objects in the list x.obj[idx], x.obj[idx+1], ..., x.next.obj[0], ... Returns the root of that tree, and the buf+idx of the nth object after x.obj[idx]. (The first object that was not included in the binary search tree.) If n == 0, returns nil, x.
func block ¶
func block()
func blockAlignSummaryRange ¶
func blockAlignSummaryRange(level int, lo, hi int) (int, int)
blockAlignSummaryRange aligns indices into the given level to that level's block width (1 << levelBits[level]). It assumes lo is inclusive and hi is exclusive, and so aligns them down and up respectively.
func blockableSig ¶
func blockableSig(sig uint32) bool
blockableSig reports whether sig may be blocked by the signal mask. We never want to block the signals marked _SigUnblock; these are the synchronous signals that turn into a Go panic. In a Go program--not a c-archive/c-shared--we never want to block the signals marked _SigKill or _SigThrow, as otherwise it's possible for all running threads to block them and delay their delivery until we start a new thread. When linked into a C program we let the C code decide on the disposition of those signals.
func blockevent ¶
func blockevent(cycles int64, skip int)
func blocksampled ¶
func blocksampled(cycles int64) bool
func bool2int ¶
func bool2int(x bool) int
bool2int returns 0 if x is false or 1 if x is true.
func breakpoint ¶
func breakpoint()
func bucketEvacuated ¶
func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool
func bucketMask ¶
func bucketMask(b uint8) uintptr
bucketMask returns 1<<b - 1, optimized for code generation.
func bucketShift ¶
func bucketShift(b uint8) uintptr
bucketShift returns 1<<b, optimized for code generation.
func bulkBarrierBitmap ¶
func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8)
bulkBarrierBitmap executes write barriers for copying from [src, src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is assumed to start maskOffset bytes into the data covered by the bitmap in bits (which may not be a multiple of 8).
This is used by bulkBarrierPreWrite for writes to data and BSS.
func bulkBarrierPreWrite ¶
func bulkBarrierPreWrite(dst, src, size uintptr)
bulkBarrierPreWrite executes a write barrier for every pointer slot in the memory range [src, src+size), using pointer/scalar information from [dst, dst+size). This executes the write barriers necessary before a memmove. src, dst, and size must be pointer-aligned. The range [dst, dst+size) must lie within a single object. It does not perform the actual writes.
As a special case, src == 0 indicates that this is being used for a memclr. bulkBarrierPreWrite will pass 0 for the src of each write barrier.
Callers should call bulkBarrierPreWrite immediately before calling memmove(dst, src, size). This function is marked nosplit to avoid being preempted; the GC must not stop the goroutine between the memmove and the execution of the barriers. The caller is also responsible for cgo pointer checks if this may be writing Go pointers into non-Go memory.
The pointer bitmap is not maintained for allocations containing no pointers at all; any caller of bulkBarrierPreWrite must first make sure the underlying allocation contains pointers, usually by checking typ.ptrdata.
Callers must perform cgo checks if writeBarrier.cgo.
func bulkBarrierPreWriteSrcOnly ¶
func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr)
bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but does not execute write barriers for [dst, dst+size).
In addition to the requirements of bulkBarrierPreWrite callers need to ensure [dst, dst+size) is zeroed.
This is used for special cases where e.g. dst was just created and zeroed with malloc.
func bytes ¶
func bytes(s string) (ret []byte)
func bytesHash ¶
func bytesHash(b []byte, seed uintptr) uintptr
func c128equal ¶
func c128equal(p, q unsafe.Pointer) bool
func c128hash ¶
func c128hash(p unsafe.Pointer, h uintptr) uintptr
func c64equal ¶
func c64equal(p, q unsafe.Pointer) bool
func c64hash ¶
func c64hash(p unsafe.Pointer, h uintptr) uintptr
func cachestats ¶
func cachestats()
cachestats flushes all mcache stats.
The world must be stopped.
func call1024 ¶
func call1024(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call1048576 ¶
func call1048576(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call1073741824 ¶
func call1073741824(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call128 ¶
func call128(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call131072 ¶
func call131072(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call134217728 ¶
func call134217728(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call16384 ¶
func call16384(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call16777216 ¶
func call16777216(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call2048 ¶
func call2048(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call2097152 ¶
func call2097152(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call256 ¶
func call256(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call262144 ¶
func call262144(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call268435456 ¶
func call268435456(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call32 ¶
func call32(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
in asm_*.s not called directly; definitions here supply type information for traceback.
func call32768 ¶
func call32768(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call33554432 ¶
func call33554432(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call4096 ¶
func call4096(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call4194304 ¶
func call4194304(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call512 ¶
func call512(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call524288 ¶
func call524288(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call536870912 ¶
func call536870912(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call64 ¶
func call64(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call65536 ¶
func call65536(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call67108864 ¶
func call67108864(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call8192 ¶
func call8192(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func call8388608 ¶
func call8388608(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
func callCgoMmap ¶
func callCgoMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) uintptr
callCgoMmap calls the mmap function in the runtime/cgo package using the GCC calling convention. It is implemented in assembly.
func callCgoMunmap ¶
func callCgoMunmap(addr unsafe.Pointer, n uintptr)
callCgoMunmap calls the munmap function in the runtime/cgo package using the GCC calling convention. It is implemented in assembly.
func callCgoSigaction ¶
func callCgoSigaction(sig uintptr, new, old *sigactiont) int32
callCgoSigaction calls the sigaction function in the runtime/cgo package using the GCC calling convention. It is implemented in assembly.
func callCgoSymbolizer ¶
func callCgoSymbolizer(arg *cgoSymbolizerArg)
callCgoSymbolizer calls the cgoSymbolizer function.
func callers ¶
func callers(skip int, pcbuf []uintptr) int
func canPreemptM ¶
func canPreemptM(mp *m) bool
canPreemptM reports whether mp is in a state that is safe to preempt.
It is nosplit because it has nosplit callers.
func canpanic ¶
func canpanic(gp *g) bool
canpanic returns false if a signal should throw instead of panicking.
func cansemacquire ¶
func cansemacquire(addr *uint32) bool
func casGFromPreempted ¶
func casGFromPreempted(gp *g, old, new uint32) bool
casGFromPreempted attempts to transition gp from _Gpreempted to _Gwaiting. If successful, the caller is responsible for re-scheduling gp.
func casGToPreemptScan ¶
func casGToPreemptScan(gp *g, old, new uint32)
casGToPreemptScan transitions gp from _Grunning to _Gscan|_Gpreempted.
TODO(austin): This is the only status operation that both changes the status and locks the _Gscan bit. Rethink this.
func casfrom_Gscanstatus ¶
func casfrom_Gscanstatus(gp *g, oldval, newval uint32)
The Gscanstatuses are acting like locks and this releases them. If it proves to be a performance hit we should be able to make these simple atomic stores but for now we are going to throw if we see an inconsistent state.
func casgcopystack ¶
func casgcopystack(gp *g) uint32
casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. Returns old status. Cannot call casgstatus directly, because we are racing with an async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, it might have become Grunnable by the time we get to the cas. If we called casgstatus, it would loop waiting for the status to go back to Gwaiting, which it never will.
func casgstatus ¶
func casgstatus(gp *g, oldval, newval uint32)
If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus and casfrom_Gscanstatus instead. casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that put it in the Gscan state is finished.
func castogscanstatus ¶
func castogscanstatus(gp *g, oldval, newval uint32) bool
This will return false if the gp is not in the expected status and the cas fails. This acts like a lock acquire while the casfromgstatus acts like a lock release.
func cfuncname ¶
func cfuncname(f funcInfo) *byte
func cfuncnameFromNameoff ¶
func cfuncnameFromNameoff(f funcInfo, nameoff int32) *byte
func cgoCheckArg ¶
func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string)
cgoCheckArg is the real work of cgoCheckPointer. The argument p is either a pointer to the value (of type t), or the value itself, depending on indir. The top parameter is whether we are at the top level, where Go pointers are allowed.
func cgoCheckBits ¶
func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr)
cgoCheckBits checks the block of memory at src, for up to size bytes, and throws if it finds a Go pointer. The gcbits mark each pointer value. The src pointer is off bytes into the gcbits.
func cgoCheckMemmove ¶
func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer, off, size uintptr)
cgoCheckMemmove is called when moving a block of memory. dst and src point off bytes into the value to copy. size is the number of bytes to copy. It throws if the program is copying a block that contains a Go pointer into non-Go memory.
func cgoCheckPointer ¶
func cgoCheckPointer(ptr interface{}, arg interface{})
cgoCheckPointer checks if the argument contains a Go pointer that points to a Go pointer, and panics if it does.
func cgoCheckResult ¶
func cgoCheckResult(val interface{})
cgoCheckResult is called to check the result parameter of an exported Go function. It panics if the result is or contains a Go pointer.
func cgoCheckSliceCopy ¶
func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int)
cgoCheckSliceCopy is called when copying n elements of a slice. src and dst are pointers to the first element of the slice. typ is the element type of the slice. It throws if the program is copying slice elements that contain Go pointers into non-Go memory.
func cgoCheckTypedBlock ¶
func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr)
cgoCheckTypedBlock checks the block of memory at src, for up to size bytes, and throws if it finds a Go pointer. The type of the memory is typ, and src is off bytes into that type.
func cgoCheckUnknownPointer ¶
func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr)
cgoCheckUnknownPointer is called for an arbitrary pointer into Go memory. It checks whether that Go memory contains any other pointer into Go memory. If it does, we panic. The return values are unused but useful to see in panic tracebacks.
func cgoCheckUsingType ¶
func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr)
cgoCheckUsingType is like cgoCheckTypedBlock, but is a last ditch fall back to look for pointers in src using the type information. We only use this when looking at a value on the stack when the type uses a GC program, because otherwise it's more efficient to use the GC bits. This is called on the system stack.
func cgoCheckWriteBarrier ¶
func cgoCheckWriteBarrier(dst *uintptr, src uintptr)
cgoCheckWriteBarrier is called whenever a pointer is stored into memory. It throws if the program is storing a Go pointer into non-Go memory.
This is called from the write barrier, so its entire call tree must be nosplit.
func cgoContextPCs ¶
func cgoContextPCs(ctxt uintptr, buf []uintptr)
cgoContextPCs gets the PC values from a cgo traceback.
func cgoInRange ¶
func cgoInRange(p unsafe.Pointer, start, end uintptr) bool
cgoInRange reports whether p is between start and end.
func cgoIsGoPointer ¶
func cgoIsGoPointer(p unsafe.Pointer) bool
cgoIsGoPointer reports whether the pointer is a Go pointer--a pointer to Go memory. We only care about Go memory that might contain pointers.
func cgoSigtramp ¶
func cgoSigtramp()
func cgoUse ¶
func cgoUse(interface{})
cgoUse is called by cgo-generated code (using go:linkname to get at an unexported name). The calls serve two purposes: 1) they are opaque to escape analysis, so the argument is considered to escape to the heap. 2) they keep the argument alive until the call site; the call is emitted after the end of the (presumed) use of the argument by C. cgoUse should not actually be called (see cgoAlwaysFalse).
func cgocall ¶
func cgocall(fn, arg unsafe.Pointer) int32
Call from Go to C.
This must be nosplit because it's used for syscalls on some platforms. Syscalls may have untyped arguments on the stack, so it's not safe to grow or scan the stack.
func cgocallback ¶
func cgocallback(fn, frame unsafe.Pointer, framesize, ctxt uintptr)
func cgocallback_gofunc ¶
func cgocallback_gofunc(fv, frame, framesize, ctxt uintptr)
Not all cgocallback_gofunc frames are actually cgocallback_gofunc, so not all have these arguments. Mark them uintptr so that the GC does not misinterpret memory when the arguments are not present. cgocallback_gofunc is not called from go, only from cgocallback, so the arguments will be found via cgocallback's pointer-declared arguments. See the assembly implementations for more details.
func cgocallbackg ¶
func cgocallbackg(ctxt uintptr)
Call from C back to Go.
func cgocallbackg1 ¶
func cgocallbackg1(ctxt uintptr)
func cgounimpl ¶
func cgounimpl()
called from (incomplete) assembly
func chanbuf ¶
func chanbuf(c *hchan, i uint) unsafe.Pointer
chanbuf(c, i) is pointer to the i'th slot in the buffer.
func chanparkcommit ¶
func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool
func chanrecv ¶
func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
chanrecv receives on channel c and writes the received data to ep. ep may be nil, in which case received data is ignored. If block == false and no elements are available, returns (false, false). Otherwise, if c is closed, zeros *ep and returns (true, false). Otherwise, fills in *ep with an element and returns (true, true). A non-nil ep must point to the heap or the caller's stack.
func chanrecv1 ¶
func chanrecv1(c *hchan, elem unsafe.Pointer)
entry points for <- c from compiled code
func chanrecv2 ¶
func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool)
func chansend ¶
func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool
* generic single channel send/recv * If block is not nil, * then the protocol will not * sleep but return if it could * not complete. * * sleep can wake up with g.param == nil * when a channel involved in the sleep has * been closed. it is easiest to loop and re-run * the operation; we'll see that it's now closed.
func chansend1 ¶
func chansend1(c *hchan, elem unsafe.Pointer)
entry point for c <- x from compiled code
func check ¶
func check()
func checkASM ¶
func checkASM() bool
checkASM reports whether assembly runtime checks have passed.
func checkTimeouts ¶
func checkTimeouts()
func checkTimers ¶
func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool)
checkTimers runs any timers for the P that are ready. If now is not 0 it is the current time. It returns the current time or 0 if it is not known, and the time when the next timer should run or 0 if there is no next timer, and reports whether it ran any timers. If the time when the next timer should run is not 0, it is always larger than the returned time. We pass now in and out to avoid extra calls of nanotime.
func checkdead ¶
func checkdead()
Check for deadlock situation. The check is based on number of running M's, if 0 -> deadlock. sched.lock must be held.
func checkmcount ¶
func checkmcount()
func checkptrAlignment ¶
func checkptrAlignment(p unsafe.Pointer, elem *_type, n uintptr)
func checkptrArithmetic ¶
func checkptrArithmetic(p unsafe.Pointer, originals []unsafe.Pointer)
func checkptrBase ¶
func checkptrBase(p unsafe.Pointer) uintptr
checkptrBase returns the base address for the allocation containing the address p.
Importantly, if p1 and p2 point into the same variable, then checkptrBase(p1) == checkptrBase(p2). However, the converse/inverse is not necessarily true as allocations can have trailing padding, and multiple variables may be packed into a single allocation.
func chunkBase ¶
func chunkBase(ci chunkIdx) uintptr
chunkIndex returns the base address of the palloc chunk at index ci.
func chunkPageIndex ¶
func chunkPageIndex(p uintptr) uint
chunkPageIndex computes the index of the page that contains p, relative to the chunk which contains p.
func cleantimers ¶
func cleantimers(pp *p)
cleantimers cleans up the head of the timer queue. This speeds up programs that create and delete timers; leaving them in the heap slows down addtimer. Reports whether no timer problems were found. The caller must have locked the timers for pp.
func clearCheckmarks ¶
func clearCheckmarks()
func clearDeletedTimers ¶
func clearDeletedTimers(pp *p)
clearDeletedTimers removes all deleted timers from the P's timer heap. This is used to avoid clogging up the heap if the program starts a lot of long-running timers and then stops them. For example, this can happen via context.WithTimeout.
This is the only function that walks through the entire timer heap, other than moveTimers which only runs when the world is stopped.
The caller must have locked the timers for pp.
func clearSignalHandlers ¶
func clearSignalHandlers()
clearSignalHandlers clears all signal handlers that are not ignored back to the default. This is called by the child after a fork, so that we can enable the signal mask for the exec without worrying about running a signal handler in the child.
func clearpools ¶
func clearpools()
func clobberfree ¶
func clobberfree(x unsafe.Pointer, size uintptr)
clobberfree sets the memory content at x to bad content, for debugging purposes.
func clone ¶
func clone(flags int32, stk, mp, gp, fn unsafe.Pointer) int32
func closechan ¶
func closechan(c *hchan)
func closefd ¶
func closefd(fd int32) int32
func closeonexec ¶
func closeonexec(fd int32)
func complex128div ¶
func complex128div(n complex128, m complex128) complex128
func concatstring2 ¶
func concatstring2(buf *tmpBuf, a [2]string) string
func concatstring3 ¶
func concatstring3(buf *tmpBuf, a [3]string) string
func concatstring4 ¶
func concatstring4(buf *tmpBuf, a [4]string) string
func concatstring5 ¶
func concatstring5(buf *tmpBuf, a [5]string) string
func concatstrings ¶
func concatstrings(buf *tmpBuf, a []string) string
concatstrings implements a Go string concatenation x+y+z+... The operands are passed in the slice a. If buf != nil, the compiler has determined that the result does not escape the calling function, so the string data can be stored in buf if small enough.
func connect ¶
func connect(fd int32, addr unsafe.Pointer, len int32) int32
func contains ¶
func contains(s, t string) bool
func convT16 ¶
func convT16(val uint16) (x unsafe.Pointer)
func convT32 ¶
func convT32(val uint32) (x unsafe.Pointer)
func convT64 ¶
func convT64(val uint64) (x unsafe.Pointer)
func convTslice ¶
func convTslice(val []byte) (x unsafe.Pointer)
func convTstring ¶
func convTstring(val string) (x unsafe.Pointer)
func copysign ¶
func copysign(x, y float64) float64
copysign returns a value with the magnitude of x and the sign of y.
func copystack ¶
func copystack(gp *g, newsize uintptr)
Copies gp's stack to a new stack of a different size. Caller must have changed gp status to Gcopystack.
func countSub ¶
func countSub(x, y uint32) int
countSub subtracts two counts obtained from profIndex.dataCount or profIndex.tagCount, assuming that they are no more than 2^29 apart (guaranteed since they are never more than len(data) or len(tags) apart, respectively). tagCount wraps at 2^30, while dataCount wraps at 2^32. This function works for both.
func countrunes ¶
func countrunes(s string) int
countrunes returns the number of runes in s.
func cpuinit ¶
func cpuinit()
cpuinit extracts the environment variable GODEBUG from the environment on Unix-like operating systems and calls internal/cpu.Initialize.
func cputicks ¶
func cputicks() int64
careful: cputicks is not guaranteed to be monotonic! In particular, we have noticed drift between cpus on certain os/arch combinations. See issue 8976.
func crash ¶
func crash()
func createfing ¶
func createfing()
func cstring ¶
func cstring(s string) unsafe.Pointer
func debugCallCheck ¶
func debugCallCheck(pc uintptr) string
debugCallCheck checks whether it is safe to inject a debugger function call with return PC pc. If not, it returns a string explaining why.
func debugCallPanicked ¶
func debugCallPanicked(val interface{})
func debugCallV1 ¶
func debugCallV1()
func debugCallWrap ¶
func debugCallWrap(dispatch uintptr)
debugCallWrap starts a new goroutine to run a debug call and blocks the calling goroutine. On the goroutine, it prepares to recover panics from the debug call, and then calls the call dispatching function at PC dispatch.
This must be deeply nosplit because there are untyped values on the stack from debugCallV1.
func debugCallWrap1 ¶
func debugCallWrap1(dispatch uintptr, callingG *g)
debugCallWrap1 is the continuation of debugCallWrap on the callee goroutine.
func debugCallWrap2 ¶
func debugCallWrap2(dispatch uintptr)
func debug_modinfo ¶
func debug_modinfo() string
func decoderune ¶
func decoderune(s string, k int) (r rune, pos int)
decoderune returns the non-ASCII rune at the start of s[k:] and the index after the rune in s.
decoderune assumes that caller has checked that the to be decoded rune is a non-ASCII rune.
If the string appears to be incomplete or decoding problems are encountered (runeerror, k + 1) is returned to ensure progress when decoderune is used to iterate over a string.
func deductSweepCredit ¶
func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr)
deductSweepCredit deducts sweep credit for allocating a span of size spanBytes. This must be performed *before* the span is allocated to ensure the system has enough credit. If necessary, it performs sweeping to prevent going in to debt. If the caller will also sweep pages (e.g., for a large allocation), it can pass a non-zero callerSweepPages to leave that many pages unswept.
deductSweepCredit makes a worst-case assumption that all spanBytes bytes of the ultimately allocated span will be available for object allocation.
deductSweepCredit is the core of the "proportional sweep" system. It uses statistics gathered by the garbage collector to perform enough sweeping so that all pages are swept during the concurrent sweep phase between GC cycles.
mheap_ must NOT be locked.
func deferArgs ¶
func deferArgs(d *_defer) unsafe.Pointer
The arguments associated with a deferred call are stored immediately after the _defer header in memory.
func deferclass ¶
func deferclass(siz uintptr) uintptr
defer size class for arg size sz
func deferproc ¶
func deferproc(siz int32, fn *funcval)
Create a new deferred function fn with siz bytes of arguments. The compiler turns a defer statement into a call to this.
func deferprocStack ¶
func deferprocStack(d *_defer)
deferprocStack queues a new deferred function with a defer record on the stack. The defer record must have its siz and fn fields initialized. All other fields can contain junk. The defer record must be immediately followed in memory by the arguments of the defer. Nosplit because the arguments on the stack won't be scanned until the defer record is spliced into the gp._defer list.
func deferreturn ¶
func deferreturn(arg0 uintptr)
Run a deferred function if there is one. The compiler inserts a call to this at the end of any function which calls defer. If there is a deferred function, this will call runtime·jmpdefer, which will jump to the deferred function such that it appears to have been called by the caller of deferreturn at the point just before deferreturn was called. The effect is that deferreturn is called again and again until there are no more deferred functions.
Declared as nosplit, because the function should not be preempted once we start modifying the caller's frame in order to reuse the frame to call the deferred function.
The single argument isn't actually used - it just has its address taken so it can be matched against pending defers.
func deltimer ¶
func deltimer(t *timer) bool
deltimer deletes the timer t. It may be on some other P, so we can't actually remove it from the timers heap. We can only mark it as deleted. It will be removed in due course by the P whose heap it is on. Reports whether the timer was removed before it was run.
func dematerializeGCProg ¶
func dematerializeGCProg(s *mspan)
func dieFromSignal ¶
func dieFromSignal(sig uint32)
dieFromSignal kills the program with a signal. This provides the expected exit status for the shell. This is only called with fatal signals expected to kill the process.
func divRoundUp ¶
func divRoundUp(n, a uintptr) uintptr
divRoundUp returns ceil(n / a).
func divlu ¶
func divlu(u1, u0, v uint64) (q, r uint64)
128/64 -> 64 quotient, 64 remainder. adapted from hacker's delight
func doInit ¶
func doInit(t *initTask)
func doSigPreempt ¶
func doSigPreempt(gp *g, ctxt *sigctxt)
doSigPreempt handles a preemption signal on gp.
func doaddtimer ¶
func doaddtimer(pp *p, t *timer)
doaddtimer adds t to the current P's heap. The caller must have locked the timers for pp.
func dodeltimer ¶
func dodeltimer(pp *p, i int)
dodeltimer removes timer i from the current P's heap. We are locked on the P when this is called. It reports whether it saw no problems due to races. The caller must have locked the timers for pp.
func dodeltimer0 ¶
func dodeltimer0(pp *p)
dodeltimer0 removes timer 0 from the current P's heap. We are locked on the P when this is called. It reports whether it saw no problems due to races. The caller must have locked the timers for pp.
func dolockOSThread ¶
func dolockOSThread()
dolockOSThread is called by LockOSThread and lockOSThread below after they modify m.locked. Do not allow preemption during this call, or else the m might be different in this function than in the caller.
func dopanic_m ¶
func dopanic_m(gp *g, pc, sp uintptr) bool
func dounlockOSThread ¶
func dounlockOSThread()
dounlockOSThread is called by UnlockOSThread and unlockOSThread below after they update m->locked. Do not allow preemption during this call, or else the m might be in different in this function than in the caller.
func dropg ¶
func dropg()
dropg removes the association between m and the current goroutine m->curg (gp for short). Typically a caller sets gp's status away from Grunning and then immediately calls dropg to finish the job. The caller is also responsible for arranging that gp will be restarted using ready at an appropriate time. After calling dropg and arranging for gp to be readied later, the caller can do other work but eventually should call schedule to restart the scheduling of goroutines on this m.
func dropm ¶
func dropm()
dropm is called when a cgo callback has called needm but is now done with the callback and returning back into the non-Go thread. It puts the current m back onto the extra list.
The main expense here is the call to signalstack to release the m's signal stack, and then the call to needm on the next callback from this thread. It is tempting to try to save the m for next time, which would eliminate both these costs, but there might not be a next time: the current thread (which Go does not control) might exit. If we saved the m for that thread, there would be an m leak each time such a thread exited. Instead, we acquire and release an m on each call. These should typically not be scheduling operations, just a few atomics, so the cost should be small.
TODO(rsc): An alternative would be to allocate a dummy pthread per-thread variable using pthread_key_create. Unlike the pthread keys we already use on OS X, this dummy key would never be read by Go code. It would exist only so that we could register at thread-exit-time destructor. That destructor would put the m back onto the extra list. This is purely a performance optimization. The current version, in which dropm happens on each cgo call, is still correct too. We may have to keep the current version on systems with cgo but without pthreads, like Windows.
func duffcopy ¶
func duffcopy()
func duffzero ¶
func duffzero()
func dumpGCProg ¶
func dumpGCProg(p *byte)
func dumpbool ¶
func dumpbool(b bool)
func dumpbv ¶
func dumpbv(cbv *bitvector, offset uintptr)
dump kinds & offsets of interesting fields in bv
func dumpfields ¶
func dumpfields(bv bitvector)
dumpint() the kind & offset of each field in an object.
func dumpfinalizer ¶
func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype)
func dumpframe ¶
func dumpframe(s *stkframe, arg unsafe.Pointer) bool
func dumpgoroutine ¶
func dumpgoroutine(gp *g)
func dumpgs ¶
func dumpgs()
func dumpgstatus ¶
func dumpgstatus(gp *g)
func dumpint ¶
func dumpint(v uint64)
dump a uint64 in a varint format parseable by encoding/binary
func dumpitabs ¶
func dumpitabs()
func dumpmemprof ¶
func dumpmemprof()
func dumpmemprof_callback ¶
func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr)
func dumpmemrange ¶
func dumpmemrange(data unsafe.Pointer, len uintptr)
dump varint uint64 length followed by memory contents
func dumpmemstats ¶
func dumpmemstats()
func dumpms ¶
func dumpms()
func dumpobj ¶
func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector)
dump an object
func dumpobjs ¶
func dumpobjs()
func dumpotherroot ¶
func dumpotherroot(description string, to unsafe.Pointer)
func dumpparams ¶
func dumpparams()
func dumpregs ¶
func dumpregs(c *sigctxt)
func dumproots ¶
func dumproots()
func dumpslice ¶
func dumpslice(b []byte)
func dumpstr ¶
func dumpstr(s string)
func dumptype ¶
func dumptype(t *_type)
dump information for a type
func dwrite ¶
func dwrite(data unsafe.Pointer, len uintptr)
func dwritebyte ¶
func dwritebyte(b byte)
func efaceHash ¶
func efaceHash(i interface{}, seed uintptr) uintptr
func efaceeq ¶
func efaceeq(t *_type, x, y unsafe.Pointer) bool
func elideWrapperCalling ¶
func elideWrapperCalling(id funcID) bool
elideWrapperCalling reports whether a wrapper function that called function id should be elided from stack traces.
func empty ¶
func empty(c *hchan) bool
empty reports whether a read from c would block (that is, the channel is empty). It uses a single atomic read of mutable state.
func encoderune ¶
func encoderune(p []byte, r rune) int
encoderune writes into p (which must be large enough) the UTF-8 encoding of the rune. It returns the number of bytes written.
func ensureSigM ¶
func ensureSigM()
ensureSigM starts one global, sleeping thread to make sure at least one thread is available to catch signals enabled for os/signal.
func entersyscall ¶
func entersyscall()
Standard syscall entry used by the go syscall library and normal cgo calls.
This is exported via linkname to assembly in the syscall package.
func entersyscall_gcwait ¶
func entersyscall_gcwait()
func entersyscall_sysmon ¶
func entersyscall_sysmon()
func entersyscallblock ¶
func entersyscallblock()
The same as entersyscall(), but with a hint that the syscall is blocking.
func entersyscallblock_handoff ¶
func entersyscallblock_handoff()
func envKeyEqual ¶
func envKeyEqual(a, b string) bool
envKeyEqual reports whether a == b, with ASCII-only case insensitivity on Windows. The two strings must have the same length.
func environ ¶
func environ() []string
func epollcreate ¶
func epollcreate(size int32) int32
func epollcreate1 ¶
func epollcreate1(flags int32) int32
func epollctl ¶
func epollctl(epfd, op, fd int32, ev *epollevent) int32
func epollwait ¶
func epollwait(epfd int32, ev *epollevent, nev, timeout int32) int32
func eqslice ¶
func eqslice(x, y []uintptr) bool
func evacuate ¶
func evacuate(t *maptype, h *hmap, oldbucket uintptr)
func evacuate_fast32 ¶
func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr)
func evacuate_fast64 ¶
func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr)
func evacuate_faststr ¶
func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr)
func evacuated ¶
func evacuated(b *bmap) bool
func execute ¶
func execute(gp *g, inheritTime bool)
Schedules gp to run on the current M. If inheritTime is true, gp inherits the remaining time in the current time slice. Otherwise, it starts a new time slice. Never returns.
Write barriers are allowed because this is called immediately after acquiring a P in several places.
func exit ¶
func exit(code int32)
func exitThread ¶
func exitThread(wait *uint32)
exitThread terminates the current thread, writing *wait = 0 when the stack is safe to reclaim.
func exitsyscall ¶
func exitsyscall()
The goroutine g exited its system call. Arrange for it to run on a cpu again. This is called only from the go syscall library, not from the low-level system calls used by the runtime.
Write barriers are not allowed because our P may have been stolen.
This is exported via linkname to assembly in the syscall package.
func exitsyscall0 ¶
func exitsyscall0(gp *g)
exitsyscall slow path on g0. Failed to acquire P, enqueue gp as runnable.
func exitsyscallfast ¶
func exitsyscallfast(oldp *p) bool
func exitsyscallfast_pidle ¶
func exitsyscallfast_pidle() bool
func exitsyscallfast_reacquired ¶
func exitsyscallfast_reacquired()
exitsyscallfast_reacquired is the exitsyscall path on which this G has successfully reacquired the P it was running on before the syscall.
func extendRandom ¶
func extendRandom(r []byte, n int)
extendRandom extends the random numbers in r[:n] to the whole slice r. Treats n<0 as n==0.
func f32equal ¶
func f32equal(p, q unsafe.Pointer) bool
func f32hash ¶
func f32hash(p unsafe.Pointer, h uintptr) uintptr
func f32to64 ¶
func f32to64(f uint32) uint64
func f32toint32 ¶
func f32toint32(x uint32) int32
func f32toint64 ¶
func f32toint64(x uint32) int64
func f32touint64 ¶
func f32touint64(x float32) uint64
func f64equal ¶
func f64equal(p, q unsafe.Pointer) bool
func f64hash ¶
func f64hash(p unsafe.Pointer, h uintptr) uintptr
func f64to32 ¶
func f64to32(f uint64) uint32
func f64toint ¶
func f64toint(f uint64) (val int64, ok bool)
func f64toint32 ¶
func f64toint32(x uint64) int32
func f64toint64 ¶
func f64toint64(x uint64) int64
func f64touint64 ¶
func f64touint64(x float64) uint64
func fadd32 ¶
func fadd32(x, y uint32) uint32
func fadd64 ¶
func fadd64(f, g uint64) uint64
func fastexprand ¶
func fastexprand(mean int) int32
fastexprand returns a random number from an exponential distribution with the specified mean.
func fastlog2 ¶
func fastlog2(x float64) float64
fastlog2 implements a fast approximation to the base 2 log of a float64. This is used to compute a geometric distribution for heap sampling, without introducing dependencies into package math. This uses a very rough approximation using the float64 exponent and the first 25 bits of the mantissa. The top 5 bits of the mantissa are used to load limits from a table of constants and the rest are used to scale linearly between them.
func fastrand ¶
func fastrand() uint32
func fastrandinit ¶
func fastrandinit()
func fastrandn ¶
func fastrandn(n uint32) uint32
func fatalpanic ¶
func fatalpanic(msgs *_panic)
fatalpanic implements an unrecoverable panic. It is like fatalthrow, except that if msgs != nil, fatalpanic also prints panic messages and decrements runningPanicDefers once main is blocked from exiting.
func fatalthrow ¶
func fatalthrow()
fatalthrow implements an unrecoverable runtime throw. It freezes the system, prints stack traces starting from its caller, and terminates the process.
func fcmp64 ¶
func fcmp64(f, g uint64) (cmp int32, isnan bool)
func fdiv32 ¶
func fdiv32(x, y uint32) uint32
func fdiv64 ¶
func fdiv64(f, g uint64) uint64
func feq32 ¶
func feq32(x, y uint32) bool
func feq64 ¶
func feq64(x, y uint64) bool
func fge32 ¶
func fge32(x, y uint32) bool
func fge64 ¶
func fge64(x, y uint64) bool
func fgt32 ¶
func fgt32(x, y uint32) bool
func fgt64 ¶
func fgt64(x, y uint64) bool
func fillAligned ¶
func fillAligned(x uint64, m uint) uint64
fillAligned returns x but with all zeroes in m-aligned groups of m bits set to 1 if any bit in the group is non-zero.
For example, fillAligned(0x0100a3, 8) == 0xff00ff.
Note that if m == 1, this is a no-op.
m must be a power of 2 <= maxPagesPerPhysPage.
func fillstack ¶
func fillstack(stk stack, b byte)
func findBitRange64 ¶
func findBitRange64(c uint64, n uint) uint
findBitRange64 returns the bit index of the first set of n consecutive 1 bits. If no consecutive set of 1 bits of size n may be found in c, then it returns an integer >= 64.
func findObject ¶
func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr)
findObject returns the base address for the heap object containing the address p, the object's span, and the index of the object in s. If p does not point into a heap object, it returns base == 0.
If p points is an invalid heap pointer and debug.invalidptr != 0, findObject panics.
refBase and refOff optionally give the base address of the object in which the pointer p was found and the byte offset at which it was found. These are used for error reporting.
It is nosplit so it is safe for p to be a pointer to the current goroutine's stack. Since p is a uintptr, it would not be adjusted if the stack were to move.
func findnull ¶
func findnull(s *byte) int
func findnullw ¶
func findnullw(s *uint16) int
func findrunnable ¶
func findrunnable() (gp *g, inheritTime bool)
Finds a runnable goroutine to execute. Tries to steal from other P's, get g from local or global queue, poll network.
func findsghi ¶
func findsghi(gp *g, stk stack) uintptr
func finishsweep_m ¶
func finishsweep_m()
finishsweep_m ensures that all spans are swept.
The world must be stopped. This ensures there are no sweeps in progress.
func finq_callback ¶
func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype)
func fint32to32 ¶
func fint32to32(x int32) uint32
func fint32to64 ¶
func fint32to64(x int32) uint64
func fint64to32 ¶
func fint64to32(x int64) uint32
func fint64to64 ¶
func fint64to64(x int64) uint64
func fintto64 ¶
func fintto64(val int64) (f uint64)
func float64bits ¶
func float64bits(f float64) uint64
Float64bits returns the IEEE 754 binary representation of f.
func float64frombits ¶
func float64frombits(b uint64) float64
Float64frombits returns the floating point number corresponding the IEEE 754 binary representation b.
func flush ¶
func flush()
func flushallmcaches ¶
func flushallmcaches()
flushallmcaches flushes the mcaches of all Ps.
The world must be stopped.
func flushmcache ¶
func flushmcache(i int)
flushmcache flushes the mcache of allp[i].
The world must be stopped.
func fmtNSAsMS ¶
func fmtNSAsMS(buf []byte, ns uint64) []byte
fmtNSAsMS nicely formats ns nanoseconds as milliseconds.
func fmul32 ¶
func fmul32(x, y uint32) uint32
func fmul64 ¶
func fmul64(f, g uint64) uint64
func fneg64 ¶
func fneg64(f uint64) uint64
func forEachP ¶
func forEachP(fn func(*p))
forEachP calls fn(p) for every P p when p reaches a GC safe point. If a P is currently executing code, this will bring the P to a GC safe point and execute fn on that P. If the P is not executing code (it is idle or in a syscall), this will call fn(p) directly while preventing the P from exiting its state. This does not ensure that fn will run on every CPU executing Go code, but it acts as a global memory barrier. GC uses this as a "ragged barrier."
The caller must hold worldsema.
func forcegchelper ¶
func forcegchelper()
func fpack32 ¶
func fpack32(sign, mant uint32, exp int, trunc uint32) uint32
func fpack64 ¶
func fpack64(sign, mant uint64, exp int, trunc uint64) uint64
func freeSomeWbufs ¶
func freeSomeWbufs(preemptible bool) bool
freeSomeWbufs frees some workbufs back to the heap and returns true if it should be called again to free more.
func freeStackSpans ¶
func freeStackSpans()
freeStackSpans frees unused stack spans at the end of GC.
func freedefer ¶
func freedefer(d *_defer)
Free the given defer. The defer cannot be used after this call.
This must not grow the stack because there may be a frame without a stack map when this is called.
func freedeferfn ¶
func freedeferfn()
func freedeferpanic ¶
func freedeferpanic()
Separate function so that it can split stack. Windows otherwise runs out of stack space.
func freemcache ¶
func freemcache(c *mcache)
func freespecial ¶
func freespecial(s *special, p unsafe.Pointer, size uintptr)
Do whatever cleanup needs to be done to deallocate s. It has already been unlinked from the mspan specials list.
func freezetheworld ¶
func freezetheworld()
Similar to stopTheWorld but best-effort and can be called several times. There is no reverse operation, used during crashing. This function must not lock any mutexes.
func fsub64 ¶
func fsub64(f, g uint64) uint64
func fuint64to32 ¶
func fuint64to32(x uint64) float32
func fuint64to64 ¶
func fuint64to64(x uint64) float64
func full ¶
func full(c *hchan) bool
full reports whether a send on c would block (that is, the channel is full). It uses a single word-sized read of mutable state, so although the answer is instantaneously true, the correct answer may have changed by the time the calling function receives the return value.
func funcMaxSPDelta ¶
func funcMaxSPDelta(f funcInfo) int32
funcMaxSPDelta returns the maximum spdelta at any point in f.
func funcPC ¶
func funcPC(f interface{}) uintptr
funcPC returns the entry PC of the function f. It assumes that f is a func value. Otherwise the behavior is undefined. CAREFUL: In programs with plugins, funcPC can return different values for the same function (because there are actually multiple copies of the same function in the address space). To be safe, don't use the results of this function in any == expression. It is only safe to use the result as an address at which to start executing code.
func funcdata ¶
func funcdata(f funcInfo, i uint8) unsafe.Pointer
func funcfile ¶
func funcfile(f funcInfo, fileno int32) string
func funcline ¶
func funcline(f funcInfo, targetpc uintptr) (file string, line int32)
func funcline1 ¶
func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int32)
func funcname ¶
func funcname(f funcInfo) string
func funcnameFromNameoff ¶
func funcnameFromNameoff(f funcInfo, nameoff int32) string
func funcspdelta ¶
func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32
func funpack32 ¶
func funpack32(f uint32) (sign, mant uint32, exp int, inf, nan bool)
func funpack64 ¶
func funpack64(f uint64) (sign, mant uint64, exp int, inf, nan bool)
func futex ¶
func futex(addr unsafe.Pointer, op int32, val uint32, ts, addr2 unsafe.Pointer, val3 uint32) int32
func futexsleep ¶
func futexsleep(addr *uint32, val uint32, ns int64)
Atomically,
if(*addr == val) sleep
Might be woken up spuriously; that's allowed. Don't sleep longer than ns; ns < 0 means forever.
func futexwakeup ¶
func futexwakeup(addr *uint32, cnt uint32)
If any procs are sleeping on addr, wake up at most cnt.
func gcAssistAlloc ¶
func gcAssistAlloc(gp *g)
gcAssistAlloc performs GC work to make gp's assist debt positive. gp must be the calling user gorountine.
This must be called with preemption enabled.
func gcAssistAlloc1 ¶
func gcAssistAlloc1(gp *g, scanWork int64)
gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system stack. This is a separate function to make it easier to see that we're not capturing anything from the user stack, since the user stack may move while we're in this function.
gcAssistAlloc1 indicates whether this assist completed the mark phase by setting gp.param to non-nil. This can't be communicated on the stack since it may move.
func gcBgMarkPrepare ¶
func gcBgMarkPrepare()
gcBgMarkPrepare sets up state for background marking. Mutator assists must not yet be enabled.
func gcBgMarkStartWorkers ¶
func gcBgMarkStartWorkers()
gcBgMarkStartWorkers prepares background mark worker goroutines. These goroutines will not run until the mark phase, but they must be started while the work is not stopped and from a regular G stack. The caller must hold worldsema.
func gcBgMarkWorker ¶
func gcBgMarkWorker(_p_ *p)
func gcDrain ¶
func gcDrain(gcw *gcWork, flags gcDrainFlags)
gcDrain scans roots and objects in work buffers, blackening grey objects until it is unable to get more work. It may return before GC is done; it's the caller's responsibility to balance work from other Ps.
If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt is set.
If flags&gcDrainIdle != 0, gcDrain returns when there is other work to do.
If flags&gcDrainFractional != 0, gcDrain self-preempts when pollFractionalWorkerExit() returns true. This implies gcDrainNoBlock.
If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work credit to gcController.bgScanCredit every gcCreditSlack units of scan work.
gcDrain will always return if there is a pending STW.
func gcDrainN ¶
func gcDrainN(gcw *gcWork, scanWork int64) int64
gcDrainN blackens grey objects until it has performed roughly scanWork units of scan work or the G is preempted. This is best-effort, so it may perform less work if it fails to get a work buffer. Otherwise, it will perform at least n units of work, but may perform more because scanning is always done in whole object increments. It returns the amount of scan work performed.
The caller goroutine must be in a preemptible state (e.g., _Gwaiting) to prevent deadlocks during stack scanning. As a consequence, this must be called on the system stack.
func gcDumpObject ¶
func gcDumpObject(label string, obj, off uintptr)
gcDumpObject dumps the contents of obj for debugging and marks the field at byte offset off in obj.
func gcEffectiveGrowthRatio ¶
func gcEffectiveGrowthRatio() float64
gcEffectiveGrowthRatio returns the current effective heap growth ratio (GOGC/100) based on heap_marked from the previous GC and next_gc for the current GC.
This may differ from gcpercent/100 because of various upper and lower bounds on gcpercent. For example, if the heap is smaller than heapminimum, this can be higher than gcpercent/100.
mheap_.lock must be held or the world must be stopped.
func gcFlushBgCredit ¶
func gcFlushBgCredit(scanWork int64)
gcFlushBgCredit flushes scanWork units of background scan work credit. This first satisfies blocked assists on the work.assistQueue and then flushes any remaining credit to gcController.bgScanCredit.
Write barriers are disallowed because this is used by gcDrain after it has ensured that all work is drained and this must preserve that condition.
func gcMark ¶
func gcMark(start_time int64)
gcMark runs the mark (or, for concurrent GC, mark termination) All gcWork caches must be empty. STW is in effect at this point.
func gcMarkDone ¶
func gcMarkDone()
gcMarkDone transitions the GC from mark to mark termination if all reachable objects have been marked (that is, there are no grey objects and can be no more in the future). Otherwise, it flushes all local work to the global queues where it can be discovered by other workers.
This should be called when all local mark work has been drained and there are no remaining workers. Specifically, when
work.nwait == work.nproc && !gcMarkWorkAvailable(p)
The calling context must be preemptible.
Flushing local work is important because idle Ps may have local work queued. This is the only way to make that work visible and drive GC to completion.
It is explicitly okay to have write barriers in this function. If it does transition to mark termination, then all reachable objects have been marked, so the write barrier cannot shade any more objects.
func gcMarkRootCheck ¶
func gcMarkRootCheck()
gcMarkRootCheck checks that all roots have been scanned. It is purely for debugging.
func gcMarkRootPrepare ¶
func gcMarkRootPrepare()
gcMarkRootPrepare queues root scanning jobs (stacks, globals, and some miscellany) and initializes scanning-related state.
The world must be stopped.
func gcMarkTermination ¶
func gcMarkTermination(nextTriggerRatio float64)
func gcMarkTinyAllocs ¶
func gcMarkTinyAllocs()
gcMarkTinyAllocs greys all active tiny alloc blocks.
The world must be stopped.
func gcMarkWorkAvailable ¶
func gcMarkWorkAvailable(p *p) bool
gcMarkWorkAvailable reports whether executing a mark worker on p is potentially useful. p may be nil, in which case it only checks the global sources of work.
func gcPaceScavenger ¶
func gcPaceScavenger()
gcPaceScavenger updates the scavenger's pacing, particularly its rate and RSS goal.
The RSS goal is based on the current heap goal with a small overhead to accommodate non-determinism in the allocator.
The pacing is based on scavengePageRate, which applies to both regular and huge pages. See that constant for more information.
mheap_.lock must be held or the world must be stopped.
func gcParkAssist ¶
func gcParkAssist() bool
gcParkAssist puts the current goroutine on the assist queue and parks.
gcParkAssist reports whether the assist is now satisfied. If it returns false, the caller must retry the assist.
func gcResetMarkState ¶
func gcResetMarkState()
gcResetMarkState resets global state prior to marking (concurrent or STW) and resets the stack scan state of all Gs.
This is safe to do without the world stopped because any Gs created during or after this will start out in the reset state.
gcResetMarkState must be called on the system stack because it acquires the heap lock. See mheap for details.
func gcSetTriggerRatio ¶
func gcSetTriggerRatio(triggerRatio float64)
gcSetTriggerRatio sets the trigger ratio and updates everything derived from it: the absolute trigger, the heap goal, mark pacing, and sweep pacing.
This can be called any time. If GC is the in the middle of a concurrent phase, it will adjust the pacing of that phase.
This depends on gcpercent, memstats.heap_marked, and memstats.heap_live. These must be up to date.
mheap_.lock must be held or the world must be stopped.
func gcStart ¶
func gcStart(trigger gcTrigger)
gcStart starts the GC. It transitions from _GCoff to _GCmark (if debug.gcstoptheworld == 0) or performs all of GC (if debug.gcstoptheworld != 0).
This may return without performing this transition in some cases, such as when called on a system stack or with locks held.
func gcSweep ¶
func gcSweep(mode gcMode)
gcSweep must be called on the system stack because it acquires the heap lock. See mheap for details.
The world must be stopped.
func gcWaitOnMark ¶
func gcWaitOnMark(n uint32)
gcWaitOnMark blocks until GC finishes the Nth mark phase. If GC has already completed this mark phase, it returns immediately.
func gcWakeAllAssists ¶
func gcWakeAllAssists()
gcWakeAllAssists wakes all currently blocked assists. This is used at the end of a GC cycle. gcBlackenEnabled must be false to prevent new assists from going to sleep after this point.
func gcWriteBarrier ¶
func gcWriteBarrier()
Called from compiled code; declared for vet; do NOT call from Go.
func gcWriteBarrierBP ¶
func gcWriteBarrierBP()
func gcWriteBarrierBX ¶
func gcWriteBarrierBX()
func gcWriteBarrierCX ¶
func gcWriteBarrierCX()
Called from compiled code; declared for vet; do NOT call from Go.
func gcWriteBarrierDX ¶
func gcWriteBarrierDX()
func gcWriteBarrierR8 ¶
func gcWriteBarrierR8()
func gcWriteBarrierR9 ¶
func gcWriteBarrierR9()
func gcWriteBarrierSI ¶
func gcWriteBarrierSI()
func gcallers ¶
func gcallers(gp *g, skip int, pcbuf []uintptr) int
func gcd ¶
func gcd(a, b uint32) uint32
func gcenable ¶
func gcenable()
gcenable is called after the bulk of the runtime initialization, just before we're about to start letting user code run. It kicks off the background sweeper goroutine, the background scavenger goroutine, and enables GC.
func gcinit ¶
func gcinit()
func gcmarknewobject ¶
func gcmarknewobject(span *mspan, obj, size, scanSize uintptr)
gcmarknewobject marks a newly allocated object black. obj must not contain any non-nil pointers.
This is nosplit so it can manipulate a gcWork without preemption.
func gcount ¶
func gcount() int32
func gcstopm ¶
func gcstopm()
Stops the current m for stopTheWorld. Returns when the world is restarted.
func gentraceback ¶
func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max int, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer, flags uint) int
Generic traceback. Handles runtime stack prints (pcbuf == nil), the runtime.Callers function (pcbuf != nil), as well as the garbage collector (callback != nil). A little clunky to merge these, but avoids duplicating the code and all its subtlety.
The skip argument is only valid with pcbuf != nil and counts the number of logical frames to skip rather than physical frames (with inlining, a PC in pcbuf can represent multiple calls). If a PC is partially skipped and max > 1, pcbuf[1] will be runtime.skipPleaseUseCallersFrames+N where N indicates the number of logical frames to skip in pcbuf[0].
func getArgInfo ¶
func getArgInfo(frame *stkframe, f funcInfo, needArgMap bool, ctxt *funcval) (arglen uintptr, argmap *bitvector)
getArgInfo returns the argument frame information for a call to f with call frame frame.
This is used for both actual calls with active stack frames and for deferred calls or goroutines that are not yet executing. If this is an actual call, ctxt must be nil (getArgInfo will retrieve what it needs from the active stack frame). If this is a deferred call or unstarted goroutine, ctxt must be the function object that was deferred or go'd.
func getArgInfoFast ¶
func getArgInfoFast(f funcInfo, needArgMap bool) (arglen uintptr, argmap *bitvector, ok bool)
getArgInfoFast returns the argument frame information for a call to f. It is short and inlineable. However, it does not handle all functions. If ok reports false, you must call getArgInfo instead. TODO(josharian): once we do mid-stack inlining, call getArgInfo directly from getArgInfoFast and stop returning an ok bool.
func getHugePageSize ¶
func getHugePageSize() uintptr
func getRandomData ¶
func getRandomData(r []byte)
func getStackMap ¶
func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord)
getStackMap returns the locals and arguments live pointer maps, and stack object list for frame.
func getargp ¶
func getargp(x int) uintptr
getargp returns the location where the caller writes outgoing function call arguments.
func getcallerpc ¶
func getcallerpc() uintptr
func getcallersp ¶
func getcallersp() uintptr
func getclosureptr ¶
func getclosureptr() uintptr
getclosureptr returns the pointer to the current closure. getclosureptr can only be used in an assignment statement at the entry of a function. Moreover, go:nosplit directive must be specified at the declaration of caller function, so that the function prolog does not clobber the closure register. for example:
//go:nosplit func f(arg1, arg2, arg3 int) { dx := getclosureptr() }
The compiler rewrites calls to this function into instructions that fetch the pointer from a well-known register (DX on x86 architecture, etc.) directly.
func getgcmask ¶
func getgcmask(ep interface{}) (mask []byte)
Returns GC type info for the pointer stored in ep for testing. If ep points to the stack, only static live information will be returned (i.e. not for objects which are only dynamically live stack objects).
func getgcmaskcb ¶
func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool
func getm ¶
func getm() uintptr
A helper function for EnsureDropM.
func getpid ¶
func getpid() int
func getproccount ¶
func getproccount() int32
func getsig ¶
func getsig(i uint32) uintptr
func gettid ¶
func gettid() uint32
func gfpurge ¶
func gfpurge(_p_ *p)
Purge all cached G's from gfree list to the global list.
func gfput ¶
func gfput(_p_ *p, gp *g)
Put on gfree list. If local list is too long, transfer a batch to the global list.
func globrunqput ¶
func globrunqput(gp *g)
Put gp on the global runnable queue. Sched must be locked. May run during STW, so write barriers are not allowed.
func globrunqputbatch ¶
func globrunqputbatch(batch *gQueue, n int32)
Put a batch of runnable goroutines on the global runnable queue. This clears *batch. Sched must be locked.
func globrunqputhead ¶
func globrunqputhead(gp *g)
Put gp at the head of the global runnable queue. Sched must be locked. May run during STW, so write barriers are not allowed.
func goPanicIndex ¶
func goPanicIndex(x int, y int)
failures in the comparisons for s[x], 0 <= x < y (y == len(s))
func goPanicIndexU ¶
func goPanicIndexU(x uint, y int)
func goPanicSlice3Acap ¶
func goPanicSlice3Acap(x int, y int)
func goPanicSlice3AcapU ¶
func goPanicSlice3AcapU(x uint, y int)
func goPanicSlice3Alen ¶
func goPanicSlice3Alen(x int, y int)
failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
func goPanicSlice3AlenU ¶
func goPanicSlice3AlenU(x uint, y int)
func goPanicSlice3B ¶
func goPanicSlice3B(x int, y int)
failures in the comparisons for s[:x:y], 0 <= x <= y
func goPanicSlice3BU ¶
func goPanicSlice3BU(x uint, y int)
func goPanicSlice3C ¶
func goPanicSlice3C(x int, y int)
failures in the comparisons for s[x:y:], 0 <= x <= y
func goPanicSlice3CU ¶
func goPanicSlice3CU(x uint, y int)
func goPanicSliceAcap ¶
func goPanicSliceAcap(x int, y int)
func goPanicSliceAcapU ¶
func goPanicSliceAcapU(x uint, y int)
func goPanicSliceAlen ¶
func goPanicSliceAlen(x int, y int)
failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
func goPanicSliceAlenU ¶
func goPanicSliceAlenU(x uint, y int)
func goPanicSliceB ¶
func goPanicSliceB(x int, y int)
failures in the comparisons for s[x:y], 0 <= x <= y
func goPanicSliceBU ¶
func goPanicSliceBU(x uint, y int)
func goargs ¶
func goargs()
func gobytes ¶
func gobytes(p *byte, n int) (b []byte)
used by cmd/cgo
func goenvs ¶
func goenvs()
func goenvs_unix ¶
func goenvs_unix()
func goexit ¶
func goexit(neverCallThisFunction)
goexit is the return stub at the top of every goroutine call stack. Each goroutine stack is constructed as if goexit called the goroutine's entry point function, so that when the entry point function returns, it will return to goexit, which will call goexit1 to perform the actual exit.
This function must never be called directly. Call goexit1 instead. gentraceback assumes that goexit terminates the stack. A direct call on the stack will cause gentraceback to stop walking the stack prematurely and if there is leftover state it may panic.
func goexit0 ¶
func goexit0(gp *g)
goexit continuation on g0.
func goexit1 ¶
func goexit1()
Finishes execution of the current goroutine.
func gogetenv ¶
func gogetenv(key string) string
func gogo ¶
func gogo(buf *gobuf)
func gopanic ¶
func gopanic(e interface{})
The implementation of the predeclared function panic.
func gopark ¶
func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int)
Puts the current goroutine into a waiting state and calls unlockf. If unlockf returns false, the goroutine is resumed. unlockf must not access this G's stack, as it may be moved between the call to gopark and the call to unlockf. Reason explains why the goroutine has been parked. It is displayed in stack traces and heap dumps. Reasons should be unique and descriptive. Do not re-use reasons, add new ones.
func goparkunlock ¶
func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int)
Puts the current goroutine into a waiting state and unlocks the lock. The goroutine can be made runnable again by calling goready(gp).
func gopreempt_m ¶
func gopreempt_m(gp *g)
func goready ¶
func goready(gp *g, traceskip int)
func gorecover ¶
func gorecover(argp uintptr) interface{}
The implementation of the predeclared function recover. Cannot split the stack because it needs to reliably find the stack segment of its caller.
TODO(rsc): Once we commit to CopyStackAlways, this doesn't need to be nosplit.
func goroutineProfileWithLabels ¶
func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool)
labels may be nil. If labels is non-nil, it must have the same length as p.
func goroutineReady ¶
func goroutineReady(arg interface{}, seq uintptr)
Ready the goroutine arg.
func goroutineheader ¶
func goroutineheader(gp *g)
func gosave ¶
func gosave(buf *gobuf)
func goschedImpl ¶
func goschedImpl(gp *g)
func gosched_m ¶
func gosched_m(gp *g)
Gosched continuation on g0.
func goschedguarded ¶
func goschedguarded()
goschedguarded yields the processor like gosched, but also checks for forbidden states and opts out of the yield in those cases.
func goschedguarded_m ¶
func goschedguarded_m(gp *g)
goschedguarded is a forbidden-states-avoided version of gosched_m
func gostartcall ¶
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer)
adjust Gobuf as if it executed a call to fn with context ctxt and then did an immediate gosave.
func gostartcallfn ¶
func gostartcallfn(gobuf *gobuf, fv *funcval)
adjust Gobuf as if it executed a call to fn and then did an immediate gosave.
func gostring ¶
func gostring(p *byte) string
This is exported via linkname to assembly in syscall (for Plan9).
func gostringn ¶
func gostringn(p *byte, l int) string
func gostringnocopy ¶
func gostringnocopy(str *byte) string
func gostringw ¶
func gostringw(strw *uint16) string
func gotraceback ¶
func gotraceback() (level int32, all, crash bool)
gotraceback returns the current traceback settings.
If level is 0, suppress all tracebacks. If level is 1, show tracebacks, but exclude runtime frames. If level is 2, show tracebacks including runtime frames. If all is set, print all goroutine stacks. Otherwise, print just the current goroutine. If crash is set, crash (core dump, etc) after tracebacking.
func goyield ¶
func goyield()
goyield is like Gosched, but it: - emits a GoPreempt trace event instead of a GoSched trace event - puts the current G on the runq of the current P instead of the globrunq
func goyield_m ¶
func goyield_m(gp *g)
func greyobject ¶
func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr)
obj is the start of an object with mark mbits. If it isn't already marked, mark it and enqueue into gcw. base and off are for debugging only and could be removed.
See also wbBufFlush1, which partially duplicates this logic.
func growWork ¶
func growWork(t *maptype, h *hmap, bucket uintptr)
func growWork_fast32 ¶
func growWork_fast32(t *maptype, h *hmap, bucket uintptr)
func growWork_fast64 ¶
func growWork_fast64(t *maptype, h *hmap, bucket uintptr)
func growWork_faststr ¶
func growWork_faststr(t *maptype, h *hmap, bucket uintptr)
func gwrite ¶
func gwrite(b []byte)
write to goroutine-local buffer if diverting output, or else standard error.
func handoffp ¶
func handoffp(_p_ *p)
Hands off P from syscall or locked M. Always runs without a P, so write barriers are not allowed.
func hasPrefix ¶
func hasPrefix(s, prefix string) bool
func hashGrow ¶
func hashGrow(t *maptype, h *hmap)
func haveexperiment ¶
func haveexperiment(name string) bool
func heapBitsSetType ¶
func heapBitsSetType(x, size, dataSize uintptr, typ *_type)
heapBitsSetType records that the new allocation [x, x+size) holds in [x, x+dataSize) one or more values of type typ. (The number of values is given by dataSize / typ.size.) If dataSize < size, the fragment [x+dataSize, x+size) is recorded as non-pointer data. It is known that the type has pointers somewhere; malloc does not call heapBitsSetType when there are no pointers, because all free objects are marked as noscan during heapBitsSweepSpan.
There can only be one allocation from a given span active at a time, and the bitmap for a span always falls on byte boundaries, so there are no write-write races for access to the heap bitmap. Hence, heapBitsSetType can access the bitmap without atomics.
There can be read-write races between heapBitsSetType and things that read the heap bitmap like scanobject. However, since heapBitsSetType is only used for objects that have not yet been made reachable, readers will ignore bits being modified by this function. This does mean this function cannot transiently modify bits that belong to neighboring objects. Also, on weakly-ordered machines, callers must execute a store/store (publication) barrier between calling this function and making the object reachable.
func heapBitsSetTypeGCProg ¶
func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte)
heapBitsSetTypeGCProg implements heapBitsSetType using a GC program. progSize is the size of the memory described by the program. elemSize is the size of the element that the GC program describes (a prefix of). dataSize is the total size of the intended data, a multiple of elemSize. allocSize is the total size of the allocated memory.
GC programs are only used for large allocations. heapBitsSetType requires that allocSize is a multiple of 4 words, so that the relevant bitmap bytes are not shared with surrounding objects.
func heapRetained ¶
func heapRetained() uint64
heapRetained returns an estimate of the current heap RSS.
func hexdumpWords ¶
func hexdumpWords(p, end uintptr, mark func(uintptr) byte)
hexdumpWords prints a word-oriented hex dump of [p, end).
If mark != nil, it will be called with each printed word's address and should return a character mark to appear just before that word's value. It can return 0 to indicate no mark.
func ifaceHash ¶
func ifaceHash(i interface { F() }, seed uintptr) uintptr
func ifaceeq ¶
func ifaceeq(tab *itab, x, y unsafe.Pointer) bool
func inHeapOrStack ¶
func inHeapOrStack(b uintptr) bool
inHeapOrStack is a variant of inheap that returns true for pointers into any allocated heap span.
func inPersistentAlloc ¶
func inPersistentAlloc(p uintptr) bool
inPersistentAlloc reports whether p points to memory allocated by persistentalloc. This must be nosplit because it is called by the cgo checker code, which is called by the write barrier code.
func inRange ¶
func inRange(r0, r1, v0, v1 uintptr) bool
inRange reports whether v0 or v1 are in the range [r0, r1].
func inVDSOPage ¶
func inVDSOPage(pc uintptr) bool
vdsoMarker reports whether PC is on the VDSO page.
func incidlelocked ¶
func incidlelocked(v int32)
func index ¶
func index(s, t string) int
func inf2one ¶
func inf2one(f float64) float64
inf2one returns a signed 1 if f is an infinity and a signed 0 otherwise. The sign of the result is the sign of f.
func inheap ¶
func inheap(b uintptr) bool
inheap reports whether b is a pointer into a (potentially dead) heap object. It returns false for pointers into mSpanManual spans. Non-preemptible because it is used by write barriers.
func init ¶
func init()
start forcegc helper goroutine
func initAlgAES ¶
func initAlgAES()
func initCheckmarks ¶
func initCheckmarks()
func initsig ¶
func initsig(preinit bool)
Initialize signals. Called by libpreinit so runtime may not be initialized.
func injectglist ¶
func injectglist(glist *gList)
injectglist adds each runnable G on the list to some run queue, and clears glist. If there is no current P, they are added to the global queue, and up to npidle M's are started to run them. Otherwise, for each idle P, this adds a G to the global queue and starts an M. Any remaining G's are added to the current P's local run queue. This may temporarily acquire the scheduler lock. Can run concurrently with GC.
func int32Hash ¶
func int32Hash(i uint32, seed uintptr) uintptr
func int64Hash ¶
func int64Hash(i uint64, seed uintptr) uintptr
func interequal ¶
func interequal(p, q unsafe.Pointer) bool
func interhash ¶
func interhash(p unsafe.Pointer, h uintptr) uintptr
func intstring ¶
func intstring(buf *[4]byte, v int64) (s string)
func isAbortPC ¶
func isAbortPC(pc uintptr) bool
isAbortPC reports whether pc is the program counter at which runtime.abort raises a signal.
It is nosplit because it's part of the isgoexception implementation.
func isAsyncSafePoint ¶
func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr)
isAsyncSafePoint reports whether gp at instruction PC is an asynchronous safe point. This indicates that:
1. It's safe to suspend gp and conservatively scan its stack and registers. There are no potentially hidden pointer values and it's not in the middle of an atomic sequence like a write barrier.
2. gp has enough stack space to inject the asyncPreempt call.
3. It's generally safe to interact with the runtime, even if we're in a signal handler stopped here. For example, there are no runtime locks held, so acquiring a runtime lock won't self-deadlock.
In some cases the PC is safe for asynchronous preemption but it also needs to adjust the resumption PC. The new PC is returned in the second result.
func isDirectIface ¶
func isDirectIface(t *_type) bool
isDirectIface reports whether t is stored directly in an interface value.
func isEmpty ¶
func isEmpty(x uint8) bool
isEmpty reports whether the given tophash array entry represents an empty bucket entry.
func isExportedRuntime ¶
func isExportedRuntime(name string) bool
isExportedRuntime reports whether name is an exported runtime function. It is only for runtime functions, so ASCII A-Z is fine.
func isFinite ¶
func isFinite(f float64) bool
isFinite reports whether f is neither NaN nor an infinity.
func isInf ¶
func isInf(f float64) bool
isInf reports whether f is an infinity.
func isNaN ¶
func isNaN(f float64) (is bool)
isNaN reports whether f is an IEEE 754 “not-a-number” value.
func isPowerOfTwo ¶
func isPowerOfTwo(x uintptr) bool
func isShrinkStackSafe ¶
func isShrinkStackSafe(gp *g) bool
isShrinkStackSafe returns whether it's safe to attempt to shrink gp's stack. Shrinking the stack is only safe when we have precise pointer maps for all frames on the stack.
func isSweepDone ¶
func isSweepDone() bool
isSweepDone reports whether all spans are swept or currently being swept.
Note that this condition may transition from false to true at any time as the sweeper runs. It may transition from true to false if a GC runs; to prevent that the caller must be non-preemptible or must somehow block GC progress.
func isSystemGoroutine ¶
func isSystemGoroutine(gp *g, fixed bool) bool
isSystemGoroutine reports whether the goroutine g must be omitted in stack dumps and deadlock detector. This is any goroutine that starts at a runtime.* entry point, except for runtime.main, runtime.handleAsyncEvent (wasm only) and sometimes runtime.runfinq.
If fixed is true, any goroutine that can vary between user and system (that is, the finalizer goroutine) is considered a user goroutine.
func itabAdd ¶
func itabAdd(m *itab)
itabAdd adds the given itab to the itab hash table. itabLock must be held.
func itabHashFunc ¶
func itabHashFunc(inter *interfacetype, typ *_type) uintptr
func itab_callback ¶
func itab_callback(tab *itab)
func itabsinit ¶
func itabsinit()
func iterate_finq ¶
func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype))
func iterate_itabs ¶
func iterate_itabs(fn func(*itab))
func iterate_memprof ¶
func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr))
func itoa ¶
func itoa(buf []byte, val uint64) []byte
itoa converts val to a decimal representation. The result is written somewhere within buf and the location of the result is returned. buf must be at least 20 bytes.
func itoaDiv ¶
func itoaDiv(buf []byte, val uint64, dec int) []byte
itoaDiv formats val/(10**dec) into buf.
func jmpdefer ¶
func jmpdefer(fv *funcval, argp uintptr)
func key32 ¶
func key32(p *uintptr) *uint32
We use the uintptr mutex.key and note.key as a uint32.
func less ¶
func less(a, b uint32) bool
less checks if a < b, considering a & b running counts that may overflow the 32-bit range, and that their "unwrapped" difference is always less than 2^31.
func lfnodeValidate ¶
func lfnodeValidate(node *lfnode)
lfnodeValidate panics if node is not a valid address for use with lfstack.push. This only needs to be called when node is allocated.
func lfstackPack ¶
func lfstackPack(node *lfnode, cnt uintptr) uint64
func libpreinit ¶
func libpreinit()
Called to do synchronous initialization of Go code built with -buildmode=c-archive or -buildmode=c-shared. None of the Go runtime is initialized.
func lock ¶
func lock(l *mutex)
func lock2 ¶
func lock2(l *mutex)
func lockInit ¶
func lockInit(l *mutex, rank lockRank)
func lockOSThread ¶
func lockOSThread()
func lockWithRank ¶
func lockWithRank(l *mutex, rank lockRank)
func lockWithRankMayAcquire ¶
func lockWithRankMayAcquire(l *mutex, rank lockRank)
func lockedOSThread ¶
func lockedOSThread() bool
func lowerASCII ¶
func lowerASCII(c byte) byte
func mProf_Flush ¶
func mProf_Flush()
mProf_Flush flushes the events from the current heap profiling cycle into the active profile. After this it is safe to start a new heap profiling cycle with mProf_NextCycle.
This is called by GC after mark termination starts the world. In contrast with mProf_NextCycle, this is somewhat expensive, but safe to do concurrently.
func mProf_FlushLocked ¶
func mProf_FlushLocked()
func mProf_Free ¶
func mProf_Free(b *bucket, size uintptr)
Called when freeing a profiled block.
func mProf_Malloc ¶
func mProf_Malloc(p unsafe.Pointer, size uintptr)
Called by malloc to record a profiled block.
func mProf_NextCycle ¶
func mProf_NextCycle()
mProf_NextCycle publishes the next heap profile cycle and creates a fresh heap profile cycle. This operation is fast and can be done during STW. The caller must call mProf_Flush before calling mProf_NextCycle again.
This is called by mark termination during STW so allocations and frees after the world is started again count towards a new heap profiling cycle.
func mProf_PostSweep ¶
func mProf_PostSweep()
mProf_PostSweep records that all sweep frees for this GC cycle have completed. This has the effect of publishing the heap profile snapshot as of the last mark termination without advancing the heap profile cycle.
func mReserveID ¶
func mReserveID() int64
mReserveID returns the next ID to use for a new m. This new m is immediately considered 'running' by checkdead.
sched.lock must be held.
func mSysStatDec ¶
func mSysStatDec(sysStat *uint64, n uintptr)
Atomically decreases a given *system* memory stat. Same comments as mSysStatInc apply.
func mSysStatInc ¶
func mSysStatInc(sysStat *uint64, n uintptr)
Atomically increases a given *system* memory stat. We are counting on this stat never overflowing a uintptr, so this function must only be used for system memory stats.
The current implementation for little endian architectures is based on xadduintptr(), which is less than ideal: xadd64() should really be used. Using xadduintptr() is a stop-gap solution until arm supports xadd64() that doesn't use locks. (Locks are a problem as they require a valid G, which restricts their useability.)
A side-effect of using xadduintptr() is that we need to check for overflow errors.
func madvise ¶
func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32
return value is only set on linux to be used in osinit()
func main ¶
func main()
The main goroutine.
func main_main ¶
func main_main()
func makeslice ¶
func makeslice(et *_type, len, cap int) unsafe.Pointer
func makeslice64 ¶
func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer
func makeslicecopy ¶
func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
makeslicecopy allocates a slice of "tolen" elements of type "et", then copies "fromlen" elements of type "et" into that new allocation from "from".
func mallocgc ¶
func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer
Allocate an object of size bytes. Small objects are allocated from the per-P cache's free lists. Large objects (> 32 kB) are allocated straight from the heap.
func mallocinit ¶
func mallocinit()
func mapaccess1 ¶
func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
mapaccess1 returns a pointer to h[key]. Never returns nil, instead it will return a reference to the zero object for the elem type if the key is not in the map. NOTE: The returned pointer may keep the whole map live, so don't hold onto it for very long.
func mapaccess1_fast32 ¶
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer
func mapaccess1_fast64 ¶
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer
func mapaccess1_faststr ¶
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer
func mapaccess1_fat ¶
func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer
func mapaccess2 ¶
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
func mapaccess2_fast32 ¶
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool)
func mapaccess2_fast64 ¶
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool)
func mapaccess2_faststr ¶
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool)
func mapaccess2_fat ¶
func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool)
func mapaccessK ¶
func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer)
returns both key and elem. Used by map iterator
func mapassign ¶
func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
Like mapaccess, but allocates a slot for the key if it is not present in the map.
func mapassign_fast32 ¶
func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer
func mapassign_fast32ptr ¶
func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
func mapassign_fast64 ¶
func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer
func mapassign_fast64ptr ¶
func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
func mapassign_faststr ¶
func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer
func mapclear ¶
func mapclear(t *maptype, h *hmap)
mapclear deletes all keys from a map.
func mapdelete ¶
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer)
func mapdelete_fast32 ¶
func mapdelete_fast32(t *maptype, h *hmap, key uint32)
func mapdelete_fast64 ¶
func mapdelete_fast64(t *maptype, h *hmap, key uint64)
func mapdelete_faststr ¶
func mapdelete_faststr(t *maptype, h *hmap, ky string)
func mapiterinit ¶
func mapiterinit(t *maptype, h *hmap, it *hiter)
mapiterinit initializes the hiter struct used for ranging over maps. The hiter struct pointed to by 'it' is allocated on the stack by the compilers order pass or on the heap by reflect_mapiterinit. Both need to have zeroed hiter since the struct contains pointers.
func mapiternext ¶
func mapiternext(it *hiter)
func markroot ¶
func markroot(gcw *gcWork, i uint32)
markroot scans the i'th root.
Preemption must be disabled (because this uses a gcWork).
nowritebarrier is only advisory here.
func markrootBlock ¶
func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int)
markrootBlock scans the shard'th shard of the block of memory [b0, b0+n0), with the given pointer mask.
func markrootFreeGStacks ¶
func markrootFreeGStacks()
markrootFreeGStacks frees stacks of dead Gs.
This does not free stacks of dead Gs cached on Ps, but having a few cached stacks around isn't a problem.
func markrootSpans ¶
func markrootSpans(gcw *gcWork, shard int)
markrootSpans marks roots for one shard of markArenas.
func mcall ¶
func mcall(fn func(*g))
mcall switches from the g to the g0 stack and invokes fn(g), where g is the goroutine that made the call. mcall saves g's current PC/SP in g->sched so that it can be restored later. It is up to fn to arrange for that later execution, typically by recording g in a data structure, causing something to call ready(g) later. mcall returns to the original goroutine g later, when g has been rescheduled. fn must not return at all; typically it ends by calling schedule, to let the m run other goroutines.
mcall can only be called from g stacks (not g0, not gsignal).
This must NOT be go:noescape: if fn is a stack-allocated closure, fn puts g on a run queue, and g executes before fn returns, the closure will be invalidated while it is still executing.
func mcommoninit ¶
func mcommoninit(mp *m, id int64)
Pre-allocated ID may be passed as 'id', or omitted by passing -1.
func mcount ¶
func mcount() int32
func mdump ¶
func mdump()
func memclrHasPointers ¶
func memclrHasPointers(ptr unsafe.Pointer, n uintptr)
memclrHasPointers clears n bytes of typed memory starting at ptr. The caller must ensure that the type of the object at ptr has pointers, usually by checking typ.ptrdata. However, ptr does not have to point to the start of the allocation.
func memclrNoHeapPointers ¶
func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
memclrNoHeapPointers clears n bytes starting at ptr.
Usually you should use typedmemclr. memclrNoHeapPointers should be used only when the caller knows that *ptr contains no heap pointers because either:
*ptr is initialized memory and its type is pointer-free, or
*ptr is uninitialized memory (e.g., memory that's being reused for a new allocation) and hence contains only "junk".
The (CPU-specific) implementations of this function are in memclr_*.s.
func memequal ¶
func memequal(a, b unsafe.Pointer, size uintptr) bool
in internal/bytealg/equal_*.s
func memequal0 ¶
func memequal0(p, q unsafe.Pointer) bool
func memequal128 ¶
func memequal128(p, q unsafe.Pointer) bool
func memequal16 ¶
func memequal16(p, q unsafe.Pointer) bool
func memequal32 ¶
func memequal32(p, q unsafe.Pointer) bool
func memequal64 ¶
func memequal64(p, q unsafe.Pointer) bool
func memequal8 ¶
func memequal8(p, q unsafe.Pointer) bool
func memequal_varlen ¶
func memequal_varlen(a, b unsafe.Pointer) bool
func memhash ¶
func memhash(p unsafe.Pointer, h, s uintptr) uintptr
in asm_*.s
func memhash0 ¶
func memhash0(p unsafe.Pointer, h uintptr) uintptr
func memhash128 ¶
func memhash128(p unsafe.Pointer, h uintptr) uintptr
func memhash16 ¶
func memhash16(p unsafe.Pointer, h uintptr) uintptr
func memhash32 ¶
func memhash32(p unsafe.Pointer, h uintptr) uintptr
func memhash32Fallback ¶
func memhash32Fallback(p unsafe.Pointer, seed uintptr) uintptr
func memhash64 ¶
func memhash64(p unsafe.Pointer, h uintptr) uintptr
func memhash64Fallback ¶
func memhash64Fallback(p unsafe.Pointer, seed uintptr) uintptr
func memhash8 ¶
func memhash8(p unsafe.Pointer, h uintptr) uintptr
func memhashFallback ¶
func memhashFallback(p unsafe.Pointer, seed, s uintptr) uintptr
func memhash_varlen ¶
func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr
func memmove ¶
func memmove(to, from unsafe.Pointer, n uintptr)
memmove copies n bytes from "from" to "to".
memmove ensures that any pointer in "from" is written to "to" with an indivisible write, so that racy reads cannot observe a half-written pointer. This is necessary to prevent the garbage collector from observing invalid pointers, and differs from memmove in unmanaged languages. However, memmove is only required to do this if "from" and "to" may contain pointers, which can only be the case if "from", "to", and "n" are all be word-aligned.
Implementations are in memmove_*.s.
func mexit ¶
func mexit(osStack bool)
mexit tears down and exits the current thread.
Don't call this directly to exit the thread, since it must run at the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to unwind the stack to the point that exits the thread.
It is entered with m.p != nil, so write barriers are allowed. It will release the P before exiting.
func mincore ¶
func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
func minit ¶
func minit()
Called to initialize a new m (including the bootstrap m). Called on the new thread, cannot allocate memory.
func minitSignalMask ¶
func minitSignalMask()
minitSignalMask is called when initializing a new m to set the thread's signal mask. When this is called all signals have been blocked for the thread. This starts with m.sigmask, which was set either from initSigmask for a newly created thread or by calling sigsave if this is a non-Go thread calling a Go function. It removes all essential signals from the mask, thus causing those signals to not be blocked. Then it sets the thread's signal mask. After this is called the thread can receive signals.
func minitSignalStack ¶
func minitSignalStack()
minitSignalStack is called when initializing a new m to set the alternate signal stack. If the alternate signal stack is not set for the thread (the normal case) then set the alternate signal stack to the gsignal stack. If the alternate signal stack is set for the thread (the case when a non-Go thread sets the alternate signal stack and then calls a Go function) then set the gsignal stack to the alternate signal stack. We also set the alternate signal stack to the gsignal stack if cgo is not used (regardless of whether it is already set). Record which choice was made in newSigstack, so that it can be undone in unminit.
func minitSignals ¶
func minitSignals()
minitSignals is called when initializing a new m to set the thread's alternate signal stack and signal mask.
func mlock ¶
func mlock(addr, len uintptr) int
func mlockGsignal ¶
func mlockGsignal(gsignal *g)
func mmap ¶
func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int)
mmap is used to route the mmap system call through C code when using cgo, to support sanitizer interceptors. Don't allow stack splits, since this function (used by sysAlloc) is called in a lot of low-level parts of the runtime and callers often assume it won't acquire any locks.
func modTimer ¶
func modTimer(t *timer, when, period int64, f func(interface{}, uintptr), arg interface{}, seq uintptr)
modTimer modifies an existing timer.
func modtimer ¶
func modtimer(t *timer, when, period int64, f func(interface{}, uintptr), arg interface{}, seq uintptr) bool
modtimer modifies an existing timer. This is called by the netpoll code or time.Ticker.Reset. Reports whether the timer was modified before it was run.
func moduledataverify ¶
func moduledataverify()
func moduledataverify1 ¶
func moduledataverify1(datap *moduledata)
func modulesinit ¶
func modulesinit()
modulesinit creates the active modules slice out of all loaded modules.
When a module is first loaded by the dynamic linker, an .init_array function (written by cmd/link) is invoked to call addmoduledata, appending to the module to the linked list that starts with firstmoduledata.
There are two times this can happen in the lifecycle of a Go program. First, if compiled with -linkshared, a number of modules built with -buildmode=shared can be loaded at program initialization. Second, a Go program can load a module while running that was built with -buildmode=plugin.
After loading, this function is called which initializes the moduledata so it is usable by the GC and creates a new activeModules list.
Only one goroutine may call modulesinit at a time.
func morestack ¶
func morestack()
func morestack_noctxt ¶
func morestack_noctxt()
func morestackc ¶
func morestackc()
This is exported as ABI0 via linkname so obj can call it.
func moveTimers ¶
func moveTimers(pp *p, timers []*timer)
moveTimers moves a slice of timers to pp. The slice has been taken from a different P. This is currently called when the world is stopped, but the caller is expected to have locked the timers for pp.
func mpreinit ¶
func mpreinit(mp *m)
Called to initialize a new m (including the bootstrap m). Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mput ¶
func mput(mp *m)
Put mp on midle list. Sched must be locked. May run during STW, so write barriers are not allowed.
func msanfree ¶
func msanfree(addr unsafe.Pointer, sz uintptr)
func msanmalloc ¶
func msanmalloc(addr unsafe.Pointer, sz uintptr)
func msanread ¶
func msanread(addr unsafe.Pointer, sz uintptr)
func msanwrite ¶
func msanwrite(addr unsafe.Pointer, sz uintptr)
func msigrestore ¶
func msigrestore(sigmask sigset)
msigrestore sets the current thread's signal mask to sigmask. This is used to restore the non-Go signal mask when a non-Go thread calls a Go function. This is nosplit and nowritebarrierrec because it is called by dropm after g has been cleared.
func mspinning ¶
func mspinning()
func mstart ¶
func mstart()
mstart is the entry-point for new Ms.
This must not split the stack because we may not even have stack bounds set up yet.
May run during STW (because it doesn't have a P yet), so write barriers are not allowed.
func mstart1 ¶
func mstart1()
func mstartm0 ¶
func mstartm0()
mstartm0 implements part of mstart1 that only runs on the m0.
Write barriers are allowed here because we know the GC can't be running yet, so they'll be no-ops.
func mullu ¶
func mullu(u, v uint64) (lo, hi uint64)
64x64 -> 128 multiply. adapted from hacker's delight.
func munmap ¶
func munmap(addr unsafe.Pointer, n uintptr)
func mutexevent ¶
func mutexevent(cycles int64, skip int)
func nanotime ¶
func nanotime() int64
func nanotime1 ¶
func nanotime1() int64
func needm ¶
func needm(x byte)
needm is called when a cgo callback happens on a thread without an m (a thread not created by Go). In this case, needm is expected to find an m to use and return with m, g initialized correctly. Since m and g are not set now (likely nil, but see below) needm is limited in what routines it can call. In particular it can only call nosplit functions (textflag 7) and cannot do any scheduling that requires an m.
In order to avoid needing heavy lifting here, we adopt the following strategy: there is a stack of available m's that can be stolen. Using compare-and-swap to pop from the stack has ABA races, so we simulate a lock by doing an exchange (via Casuintptr) to steal the stack head and replace the top pointer with MLOCKED (1). This serves as a simple spin lock that we can use even without an m. The thread that locks the stack in this way unlocks the stack by storing a valid stack head pointer.
In order to make sure that there is always an m structure available to be stolen, we maintain the invariant that there is always one more than needed. At the beginning of the program (if cgo is in use) the list is seeded with a single m. If needm finds that it has taken the last m off the list, its job is - once it has installed its own m so that it can do things like allocate memory - to create a spare m and put it on the list.
Each of these extra m's also has a g0 and a curg that are pressed into service as the scheduling stack and current goroutine for the duration of the cgo callback.
When the callback is done with the m, it calls dropm to put the m back on the list.
func netpollBreak ¶
func netpollBreak()
netpollBreak interrupts an epollwait.
func netpollDeadline ¶
func netpollDeadline(arg interface{}, seq uintptr)
func netpollGenericInit ¶
func netpollGenericInit()
func netpollIsPollDescriptor ¶
func netpollIsPollDescriptor(fd uintptr) bool
func netpollReadDeadline ¶
func netpollReadDeadline(arg interface{}, seq uintptr)
func netpollWriteDeadline ¶
func netpollWriteDeadline(arg interface{}, seq uintptr)
func netpollarm ¶
func netpollarm(pd *pollDesc, mode int)
func netpollblock ¶
func netpollblock(pd *pollDesc, mode int32, waitio bool) bool
returns true if IO is ready, or false if timedout or closed waitio - wait only for completed IO, ignore errors
func netpollblockcommit ¶
func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool
func netpollcheckerr ¶
func netpollcheckerr(pd *pollDesc, mode int32) int
func netpollclose ¶
func netpollclose(fd uintptr) int32
func netpolldeadlineimpl ¶
func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool)
func netpollgoready ¶
func netpollgoready(gp *g, traceskip int)
func netpollinit ¶
func netpollinit()
func netpollinited ¶
func netpollinited() bool
func netpollopen ¶
func netpollopen(fd uintptr, pd *pollDesc) int32
func netpollready ¶
func netpollready(toRun *gList, pd *pollDesc, mode int32)
netpollready is called by the platform-specific netpoll function. It declares that the fd associated with pd is ready for I/O. The toRun argument is used to build a list of goroutines to return from netpoll. The mode argument is 'r', 'w', or 'r'+'w' to indicate whether the fd is ready for reading or writing or both.
This may run while the world is stopped, so write barriers are not allowed.
func newarray ¶
func newarray(typ *_type, n int) unsafe.Pointer
newarray allocates an array of n elements of type typ.
func newextram ¶
func newextram()
newextram allocates m's and puts them on the extra list. It is called with a working local m, so that it can do things like call schedlock and allocate.
func newm ¶
func newm(fn func(), _p_ *p, id int64)
Create a new m. It will start off with a call to fn, or else the scheduler. fn needs to be static and not a heap allocated closure. May run with m.p==nil, so write barriers are not allowed.
id is optional pre-allocated m ID. Omit by passing -1.
func newm1 ¶
func newm1(mp *m)
func newobject ¶
func newobject(typ *_type) unsafe.Pointer
implementation of new builtin compiler (both frontend and SSA backend) knows the signature of this function
func newosproc ¶
func newosproc(mp *m)
May run with m.p==nil, so write barriers are not allowed.
func newosproc0 ¶
func newosproc0(stacksize uintptr, fn unsafe.Pointer)
Version of newosproc that doesn't require a valid G.
func newproc ¶
func newproc(siz int32, fn *funcval)
Create a new g running fn with siz bytes of arguments. Put it on the queue of g's waiting to run. The compiler turns a go statement into a call to this.
The stack layout of this call is unusual: it assumes that the arguments to pass to fn are on the stack sequentially immediately after &fn. Hence, they are logically part of newproc's argument frame, even though they don't appear in its signature (and can't because their types differ between call sites).
This must be nosplit because this stack layout means there are untyped arguments in newproc's argument frame. Stack copies won't be able to adjust them and stack splits won't be able to copy them.
func newstack ¶
func newstack()
Called from runtime·morestack when more stack is needed. Allocate larger stack and relocate to new stack. Stack growth is multiplicative, for constant amortized cost.
g->atomicstatus will be Grunning or Gscanrunning upon entry. If the scheduler is trying to stop this g, then it will set preemptStop.
This must be nowritebarrierrec because it can be called as part of stack growth from other nowritebarrierrec functions, but the compiler doesn't check this.
func nextMarkBitArenaEpoch ¶
func nextMarkBitArenaEpoch()
nextMarkBitArenaEpoch establishes a new epoch for the arenas holding the mark bits. The arenas are named relative to the current GC cycle which is demarcated by the call to finishweep_m.
All current spans have been swept. During that sweep each span allocated room for its gcmarkBits in gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current where the GC will mark objects and after each span is swept these bits will be used to allocate objects. gcBitsArenas.current becomes gcBitsArenas.previous where the span's gcAllocBits live until all the spans have been swept during this GC cycle. The span's sweep extinguishes all the references to gcBitsArenas.previous by pointing gcAllocBits into the gcBitsArenas.current. The gcBitsArenas.previous is released to the gcBitsArenas.free list.
func nextSample ¶
func nextSample() uintptr
nextSample returns the next sampling point for heap profiling. The goal is to sample allocations on average every MemProfileRate bytes, but with a completely random distribution over the allocation timeline; this corresponds to a Poisson process with parameter MemProfileRate. In Poisson processes, the distance between two samples follows the exponential distribution (exp(MemProfileRate)), so the best return value is a random number taken from an exponential distribution whose mean is MemProfileRate.
func nextSampleNoFP ¶
func nextSampleNoFP() uintptr
nextSampleNoFP is similar to nextSample, but uses older, simpler code to avoid floating point.
func nilfunc ¶
func nilfunc()
func nilinterequal ¶
func nilinterequal(p, q unsafe.Pointer) bool
func nilinterhash ¶
func nilinterhash(p unsafe.Pointer, h uintptr) uintptr
func noSignalStack ¶
func noSignalStack(sig uint32)
This is called when we receive a signal when there is no signal stack. This can only happen if non-Go code calls sigaltstack to disable the signal stack.
func nobarrierWakeTime ¶
func nobarrierWakeTime(pp *p) int64
nobarrierWakeTime looks at P's timers and returns the time when we should wake up the netpoller. It returns 0 if there are no timers. This function is invoked when dropping a P, and must run without any write barriers. Therefore, if there are any timers that needs to be moved earlier, it conservatively returns the current time. The netpoller M will wake up and adjust timers before sleeping again.
func noescape ¶
func noescape(p unsafe.Pointer) unsafe.Pointer
noescape hides a pointer from escape analysis. noescape is the identity function but escape analysis doesn't think the output depends on the input. noescape is inlined and currently compiles down to zero instructions. USE CAREFULLY!
func nonblockingPipe ¶
func nonblockingPipe() (r, w int32, errno int32)
func noteclear ¶
func noteclear(n *note)
One-time notifications.
func notesleep ¶
func notesleep(n *note)
func notetsleep ¶
func notetsleep(n *note, ns int64) bool
func notetsleep_internal ¶
func notetsleep_internal(n *note, ns int64) bool
May run with m.p==nil if called from notetsleep, so write barriers are not allowed.
func notetsleepg ¶
func notetsleepg(n *note, ns int64) bool
same as runtime·notetsleep, but called on user g (not g0) calls only nosplit functions between entersyscallblock/exitsyscall
func notewakeup ¶
func notewakeup(n *note)
func notifyListAdd ¶
func notifyListAdd(l *notifyList) uint32
notifyListAdd adds the caller to a notify list such that it can receive notifications. The caller must eventually call notifyListWait to wait for such a notification, passing the returned ticket number.
func notifyListCheck ¶
func notifyListCheck(sz uintptr)
func notifyListNotifyAll ¶
func notifyListNotifyAll(l *notifyList)
notifyListNotifyAll notifies all entries in the list.
func notifyListNotifyOne ¶
func notifyListNotifyOne(l *notifyList)
notifyListNotifyOne notifies one entry in the list.
func notifyListWait ¶
func notifyListWait(l *notifyList, t uint32)
notifyListWait waits for a notification. If one has been sent since notifyListAdd was called, it returns immediately. Otherwise, it blocks.
func offAddrToLevelIndex ¶
func offAddrToLevelIndex(level int, addr offAddr) int
offAddrToLevelIndex converts an address in the offset address space to the index into summary[level] containing addr.
func oldMarkrootSpans ¶
func oldMarkrootSpans(gcw *gcWork, shard int)
oldMarkrootSpans marks roots for one shard of work.spans.
For go115NewMarkrootSpans = false.
func oneNewExtraM ¶
func oneNewExtraM()
oneNewExtraM allocates an m and puts it on the extra list.
func open ¶
func open(name *byte, mode, perm int32) int32
func osArchInit ¶
func osArchInit()
func osPreemptExtEnter ¶
func osPreemptExtEnter(mp *m)
func osPreemptExtExit ¶
func osPreemptExtExit(mp *m)
func osRelax ¶
func osRelax(relax bool)
osRelax is called by the scheduler when transitioning to and from all Ps being idle.
func osStackAlloc ¶
func osStackAlloc(s *mspan)
osStackAlloc performs OS-specific initialization before s is used as stack memory.
func osStackFree ¶
func osStackFree(s *mspan)
osStackFree undoes the effect of osStackAlloc before s is returned to the heap.
func os_beforeExit ¶
func os_beforeExit()
os_beforeExit is called from os.Exit(0).
func os_runtime_args ¶
func os_runtime_args() []string
func os_sigpipe ¶
func os_sigpipe()
func osinit ¶
func osinit()
func osyield ¶
func osyield()
func overLoadFactor ¶
func overLoadFactor(count int, B uint8) bool
overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
func pageIndexOf ¶
func pageIndexOf(p uintptr) (arena *heapArena, pageIdx uintptr, pageMask uint8)
pageIndexOf returns the arena, page index, and page mask for pointer p. The caller must ensure p is in the heap.
func panicCheck1 ¶
func panicCheck1(pc uintptr, msg string)
Check to make sure we can really generate a panic. If the panic was generated from the runtime, or from inside malloc, then convert to a throw of msg. pc should be the program counter of the compiler-generated code that triggered this panic.
func panicCheck2 ¶
func panicCheck2(err string)
Same as above, but calling from the runtime is allowed.
Using this function is necessary for any panic that may be generated by runtime.sigpanic, since those are always called by the runtime.
func panicIndex ¶
func panicIndex(x int, y int)
Implemented in assembly, as they take arguments in registers. Declared here to mark them as ABIInternal.
func panicIndexU ¶
func panicIndexU(x uint, y int)
func panicSlice3Acap ¶
func panicSlice3Acap(x int, y int)
func panicSlice3AcapU ¶
func panicSlice3AcapU(x uint, y int)
func panicSlice3Alen ¶
func panicSlice3Alen(x int, y int)
func panicSlice3AlenU ¶
func panicSlice3AlenU(x uint, y int)
func panicSlice3B ¶
func panicSlice3B(x int, y int)
func panicSlice3BU ¶
func panicSlice3BU(x uint, y int)
func panicSlice3C ¶
func panicSlice3C(x int, y int)
func panicSlice3CU ¶
func panicSlice3CU(x uint, y int)
func panicSliceAcap ¶
func panicSliceAcap(x int, y int)
func panicSliceAcapU ¶
func panicSliceAcapU(x uint, y int)
func panicSliceAlen ¶
func panicSliceAlen(x int, y int)
func panicSliceAlenU ¶
func panicSliceAlenU(x uint, y int)
func panicSliceB ¶
func panicSliceB(x int, y int)
func panicSliceBU ¶
func panicSliceBU(x uint, y int)
func panicdivide ¶
func panicdivide()
func panicdottypeE ¶
func panicdottypeE(have, want, iface *_type)
panicdottypeE is called when doing an e.(T) conversion and the conversion fails. have = the dynamic type we have. want = the static type we're trying to convert to. iface = the static type we're converting from.
func panicdottypeI ¶
func panicdottypeI(have *itab, want, iface *_type)
panicdottypeI is called when doing an i.(T) conversion and the conversion fails. Same args as panicdottypeE, but "have" is the dynamic itab we have.
func panicfloat ¶
func panicfloat()
func panicmakeslicecap ¶
func panicmakeslicecap()
func panicmakeslicelen ¶
func panicmakeslicelen()
func panicmem ¶
func panicmem()
func panicnildottype ¶
func panicnildottype(want *_type)
panicnildottype is called when doing a i.(T) conversion and the interface i is nil. want = the static type we're trying to convert to.
func panicoverflow ¶
func panicoverflow()
func panicshift ¶
func panicshift()
func panicwrap ¶
func panicwrap()
panicwrap generates a panic for a call to a wrapped value method with a nil pointer receiver.
It is called from the generated wrapper code.
func park_m ¶
func park_m(gp *g)
park continuation on g0.
func parkunlock_c ¶
func parkunlock_c(gp *g, lock unsafe.Pointer) bool
func parseRelease ¶
func parseRelease(rel string) (major, minor, patch int, ok bool)
parseRelease parses a dot-separated version number. It follows the semver syntax, but allows the minor and patch versions to be elided.
func parsedebugvars ¶
func parsedebugvars()
func pcdatastart ¶
func pcdatastart(f funcInfo, table int32) int32
func pcdatavalue ¶
func pcdatavalue(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache) int32
func pcdatavalue1 ¶
func pcdatavalue1(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32
func pcdatavalue2 ¶
func pcdatavalue2(f funcInfo, table int32, targetpc uintptr) (int32, uintptr)
Like pcdatavalue, but also return the start PC of this PCData value. It doesn't take a cache.
func pcvalue ¶
func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, strict bool) (int32, uintptr)
Returns the PCData value, and the PC where this value starts. TODO: the start PC is returned only when cache is nil.
func pcvalueCacheKey ¶
func pcvalueCacheKey(targetpc uintptr) uintptr
pcvalueCacheKey returns the outermost index in a pcvalueCache to use for targetpc. It must be very cheap to calculate. For now, align to sys.PtrSize and reduce mod the number of entries. In practice, this appears to be fairly randomly and evenly distributed.
func persistentalloc ¶
func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer
Wrapper around sysAlloc that can allocate small chunks. There is no associated free operation. Intended for things like function/type/debug-related persistent data. If align is 0, uses default align (currently 8). The returned memory will be zeroed.
Consider marking persistentalloc'd types go:notinheap.
func pidleput ¶
func pidleput(_p_ *p)
Put p to on _Pidle list. Sched must be locked. May run during STW, so write barriers are not allowed.
func pipe ¶
func pipe() (r, w int32, errno int32)
func pipe2 ¶
func pipe2(flags int32) (r, w int32, errno int32)
func plugin_lastmoduleinit ¶
func plugin_lastmoduleinit() (path string, syms map[string]interface{}, errstr string)
func pluginftabverify ¶
func pluginftabverify(md *moduledata)
func pollFractionalWorkerExit ¶
func pollFractionalWorkerExit() bool
pollFractionalWorkerExit reports whether a fractional mark worker should self-preempt. It assumes it is called from the fractional worker.
func pollWork ¶
func pollWork() bool
pollWork reports whether there is non-background work this P could be doing. This is a fairly lightweight check to be used for background work loops, like idle GC. It checks a subset of the conditions checked by the actual scheduler.
func poll_runtime_Semacquire ¶
func poll_runtime_Semacquire(addr *uint32)
func poll_runtime_Semrelease ¶
func poll_runtime_Semrelease(addr *uint32)
func poll_runtime_isPollServerDescriptor ¶
func poll_runtime_isPollServerDescriptor(fd uintptr) bool
poll_runtime_isPollServerDescriptor reports whether fd is a descriptor being used by netpoll.
func poll_runtime_pollClose ¶
func poll_runtime_pollClose(pd *pollDesc)
func poll_runtime_pollOpen ¶
func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int)
func poll_runtime_pollReset ¶
func poll_runtime_pollReset(pd *pollDesc, mode int) int
poll_runtime_pollReset, which is internal/poll.runtime_pollReset, prepares a descriptor for polling in mode, which is 'r' or 'w'. This returns an error code; the codes are defined above.
func poll_runtime_pollServerInit ¶
func poll_runtime_pollServerInit()
func poll_runtime_pollSetDeadline ¶
func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int)
func poll_runtime_pollUnblock ¶
func poll_runtime_pollUnblock(pd *pollDesc)
func poll_runtime_pollWait ¶
func poll_runtime_pollWait(pd *pollDesc, mode int) int
poll_runtime_pollWait, which is internal/poll.runtime_pollWait, waits for a descriptor to be ready for reading or writing, according to mode, which is 'r' or 'w'. This returns an error code; the codes are defined above.
func poll_runtime_pollWaitCanceled ¶
func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int)
func preemptM ¶
func preemptM(mp *m)
preemptM sends a preemption request to mp. This request may be handled asynchronously and may be coalesced with other requests to the M. When the request is received, if the running G or P are marked for preemption and the goroutine is at an asynchronous safe-point, it will preempt the goroutine. It always atomically increments mp.preemptGen after handling a preemption request.
func preemptPark ¶
func preemptPark(gp *g)
preemptPark parks gp and puts it in _Gpreempted.
func preemptall ¶
func preemptall() bool
Tell all goroutines that they have been preempted and they should stop. This function is purely best-effort. It can fail to inform a goroutine if a processor just started running it. No locks need to be held. Returns true if preemption request was issued to at least one goroutine.
func preemptone ¶
func preemptone(_p_ *p) bool
Tell the goroutine running on processor P to stop. This function is purely best-effort. It can incorrectly fail to inform the goroutine. It can send inform the wrong goroutine. Even if it informs the correct goroutine, that goroutine might ignore the request if it is simultaneously executing newstack. No lock needs to be held. Returns true if preemption request was issued. The actual preemption will happen at some point in the future and will be indicated by the gp->status no longer being Grunning
func prepGoExitFrame ¶
func prepGoExitFrame(sp uintptr)
func prepareFreeWorkbufs ¶
func prepareFreeWorkbufs()
prepareFreeWorkbufs moves busy workbuf spans to free list so they can be freed to the heap. This must only be called when all workbufs are on the empty list.
func preprintpanics ¶
func preprintpanics(p *_panic)
Call all Error and String methods before freezing the world. Used when crashing with panicking.
func printAncestorTraceback ¶
func printAncestorTraceback(ancestor ancestorInfo)
printAncestorTraceback prints the traceback of the given ancestor. TODO: Unify this with gentraceback and CallersFrames.
func printAncestorTracebackFuncInfo ¶
func printAncestorTracebackFuncInfo(f funcInfo, pc uintptr)
printAncestorTraceback prints the given function info at a given pc within an ancestor traceback. The precision of this info is reduced due to only have access to the pcs at the time of the caller goroutine being created.
func printCgoTraceback ¶
func printCgoTraceback(callers *cgoCallers)
cgoTraceback prints a traceback of callers.
func printDebugLog ¶
func printDebugLog()
printDebugLog prints the debug log.
func printDebugLogPC ¶
func printDebugLogPC(pc uintptr, returnPC bool)
printDebugLogPC prints a single symbolized PC. If returnPC is true, pc is a return PC that must first be converted to a call PC.
func printOneCgoTraceback ¶
func printOneCgoTraceback(pc uintptr, max int, arg *cgoSymbolizerArg) int
printOneCgoTraceback prints the traceback of a single cgo caller. This can print more than one line because of inlining. Returns the number of frames printed.
func printScavTrace ¶
func printScavTrace(gen uint32, released uintptr, forced bool)
printScavTrace prints a scavenge trace line to standard error.
released should be the amount of memory released since the last time this was called, and forced indicates whether the scavenge was forced by the application.
func printany ¶
func printany(i interface{})
printany prints an argument passed to panic. If panic is called with a value that has a String or Error method, it has already been converted into a string by preprintpanics.
func printanycustomtype ¶
func printanycustomtype(i interface{})
func printbool ¶
func printbool(v bool)
func printcomplex ¶
func printcomplex(c complex128)
func printcreatedby ¶
func printcreatedby(gp *g)
func printcreatedby1 ¶
func printcreatedby1(f funcInfo, pc uintptr)
func printeface ¶
func printeface(e eface)
func printfloat ¶
func printfloat(v float64)
func printhex ¶
func printhex(v uint64)
func printiface ¶
func printiface(i iface)
func printint ¶
func printint(v int64)
func printlock ¶
func printlock()
func printnl ¶
func printnl()
func printpanics ¶
func printpanics(p *_panic)
Print all currently active panics. Used when crashing. Should only be called after preprintpanics.
func printpointer ¶
func printpointer(p unsafe.Pointer)
func printslice ¶
func printslice(s []byte)
func printsp ¶
func printsp()
func printstring ¶
func printstring(s string)
func printuint ¶
func printuint(v uint64)
func printunlock ¶
func printunlock()
func procPin ¶
func procPin() int
func procUnpin ¶
func procUnpin()
func procyield ¶
func procyield(cycles uint32)
func profilealloc ¶
func profilealloc(mp *m, x unsafe.Pointer, size uintptr)
func publicationBarrier ¶
func publicationBarrier()
publicationBarrier performs a store/store barrier (a "publication" or "export" barrier). Some form of synchronization is required between initializing an object and making that object accessible to another processor. Without synchronization, the initialization writes and the "publication" write may be reordered, allowing the other processor to follow the pointer and observe an uninitialized object. In general, higher-level synchronization should be used, such as locking or an atomic pointer write. publicationBarrier is for when those aren't an option, such as in the implementation of the memory manager.
There's no corresponding barrier for the read side because the read side naturally has a data dependency order. All architectures that Go supports or seems likely to ever support automatically enforce data dependency ordering.
func purgecachedstats ¶
func purgecachedstats(c *mcache)
func putCachedDlogger ¶
func putCachedDlogger(l *dlogger) bool
func putempty ¶
func putempty(b *workbuf)
putempty puts a workbuf onto the work.empty list. Upon entry this go routine owns b. The lfstack.push relinquishes ownership.
func putfull ¶
func putfull(b *workbuf)
putfull puts the workbuf on the work.full list for the GC. putfull accepts partially full buffers so the GC can avoid competing with the mutators for ownership of partially full buffers.
func queuefinalizer ¶
func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype)
func raceReadObjectPC ¶
func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr)
func raceWriteObjectPC ¶
func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr)
func raceacquire ¶
func raceacquire(addr unsafe.Pointer)
func raceacquirectx ¶
func raceacquirectx(racectx uintptr, addr unsafe.Pointer)
func raceacquireg ¶
func raceacquireg(gp *g, addr unsafe.Pointer)
func racectxend ¶
func racectxend(racectx uintptr)
func racefingo ¶
func racefingo()
func racefini ¶
func racefini()
func racefree ¶
func racefree(p unsafe.Pointer, sz uintptr)
func racegoend ¶
func racegoend()
func racegostart ¶
func racegostart(pc uintptr) uintptr
func raceinit ¶
func raceinit() (uintptr, uintptr)
func racemalloc ¶
func racemalloc(p unsafe.Pointer, sz uintptr)
func racemapshadow ¶
func racemapshadow(addr unsafe.Pointer, size uintptr)
func raceproccreate ¶
func raceproccreate() uintptr
func raceprocdestroy ¶
func raceprocdestroy(ctx uintptr)
func racereadpc ¶
func racereadpc(addr unsafe.Pointer, callerpc, pc uintptr)
func racereadrangepc ¶
func racereadrangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr)
func racerelease ¶
func racerelease(addr unsafe.Pointer)
func racereleaseg ¶
func racereleaseg(gp *g, addr unsafe.Pointer)
func racereleasemerge ¶
func racereleasemerge(addr unsafe.Pointer)
func racereleasemergeg ¶
func racereleasemergeg(gp *g, addr unsafe.Pointer)
func racesync ¶
func racesync(c *hchan, sg *sudog)
func racewritepc ¶
func racewritepc(addr unsafe.Pointer, callerpc, pc uintptr)
func racewriterangepc ¶
func racewriterangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr)
func raise ¶
func raise(sig uint32)
func raisebadsignal ¶
func raisebadsignal(sig uint32, c *sigctxt)
raisebadsignal is called when a signal is received on a non-Go thread, and the Go program does not want to handle it (that is, the program has not called os/signal.Notify for the signal).
func raiseproc ¶
func raiseproc(sig uint32)
func rawbyteslice ¶
func rawbyteslice(size int) (b []byte)
rawbyteslice allocates a new byte slice. The byte slice is not zeroed.
func rawruneslice ¶
func rawruneslice(size int) (b []rune)
rawruneslice allocates a new rune slice. The rune slice is not zeroed.
func rawstring ¶
func rawstring(size int) (s string, b []byte)
rawstring allocates storage for a new string. The returned string and byte slice both refer to the same storage. The storage is not zeroed. Callers should use b to set the string contents and then drop b.
func rawstringtmp ¶
func rawstringtmp(buf *tmpBuf, l int) (s string, b []byte)
func read ¶
func read(fd int32, p unsafe.Pointer, n int32) int32
read calls the read system call. It returns a non-negative number of bytes written or a negative errno value.
func readGCStats ¶
func readGCStats(pauses *[]uint64)
func readGCStats_m ¶
func readGCStats_m(pauses *[]uint64)
readGCStats_m must be called on the system stack because it acquires the heap lock. See mheap for details.
func readUnaligned32 ¶
func readUnaligned32(p unsafe.Pointer) uint32
Note: These routines perform the read with a native endianness.
func readUnaligned64 ¶
func readUnaligned64(p unsafe.Pointer) uint64
func readgogc ¶
func readgogc() int32
func readgstatus ¶
func readgstatus(gp *g) uint32
All reads and writes of g's status go through readgstatus, casgstatus castogscanstatus, casfrom_Gscanstatus.
func readmemstats_m ¶
func readmemstats_m(stats *MemStats)
func readvarint ¶
func readvarint(p []byte) (read uint32, val uint32)
readvarint reads a varint from p.
func readvarintUnsafe ¶
func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer)
readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the uint32 and a pointer to the byte following the varint.
There is a similar function runtime.readvarint, which takes a slice of bytes, rather than an unsafe pointer. These functions are duplicated, because one of the two use cases for the functions would get slower if the functions were combined.
func ready ¶
func ready(gp *g, traceskip int, next bool)
Mark gp ready to run.
func readyForScavenger ¶
func readyForScavenger()
readyForScavenger signals sysmon to wake the scavenger because there may be new work to do.
There may be a significant delay between when this function runs and when the scavenger is kicked awake, but it may be safely invoked in contexts where wakeScavenger is unsafe to call directly.
func readyWithTime ¶
func readyWithTime(s *sudog, traceskip int)
func record ¶
func record(r *MemProfileRecord, b *bucket)
Write b's data to r.
func recordForPanic ¶
func recordForPanic(b []byte)
recordForPanic maintains a circular buffer of messages written by the runtime leading up to a process crash, allowing the messages to be extracted from a core dump.
The text written during a process crash (following "panic" or "fatal error") is not saved, since the goroutine stacks will generally be readable from the runtime datastructures in the core file.
func recordspan ¶
func recordspan(vh unsafe.Pointer, p unsafe.Pointer)
recordspan adds a newly allocated span to h.allspans.
This only happens the first time a span is allocated from mheap.spanalloc (it is not called when a span is reused).
Write barriers are disallowed here because it can be called from gcWork when allocating new workbufs. However, because it's an indirect call from the fixalloc initializer, the compiler can't see this.
func recovery ¶
func recovery(gp *g)
Unwind the stack after a deferred function calls recover after a panic. Then arrange to continue running as though the caller of the deferred function returned normally.
func recv ¶
func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int)
recv processes a receive operation on a full channel c. There are 2 parts: 1) The value sent by the sender sg is put into the channel
and the sender is woken up to go on its merry way.
2) The value received by the receiver (the current G) is
written to ep.
For synchronous channels, both values are the same. For asynchronous channels, the receiver gets its data from the channel buffer and the sender's data is put in the channel buffer. Channel c must be full and locked. recv unlocks c with unlockf. sg must already be dequeued from c. A non-nil ep must point to the heap or the caller's stack.
func recvDirect ¶
func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer)
func reentersyscall ¶
func reentersyscall(pc, sp uintptr)
The goroutine g is about to enter a system call. Record that it's not using the cpu anymore. This is called only from the go syscall library and cgocall, not from the low-level system calls used by the runtime.
Entersyscall cannot split the stack: the gosave must make g->sched refer to the caller's stack segment, because entersyscall is going to return immediately after.
Nothing entersyscall calls can split the stack either. We cannot safely move the stack during an active call to syscall, because we do not know which of the uintptr arguments are really pointers (back into the stack). In practice, this means that we make the fast path run through entersyscall doing no-split things, and the slow path has to use systemstack to run bigger things on the system stack.
reentersyscall is the entry point used by cgo callbacks, where explicitly saved SP and PC are restored. This is needed when exitsyscall will be called from a function further up in the call stack than the parent, as g->syscallsp must always point to a valid stack frame. entersyscall below is the normal entry point for syscalls, which obtains the SP and PC from the caller.
Syscall tracing: At the start of a syscall we emit traceGoSysCall to capture the stack trace. If the syscall does not block, that is it, we do not emit any other events. If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; when syscall returns we emit traceGoSysExit and when the goroutine starts running (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), whoever emits traceGoSysBlock increments p.syscalltick afterwards; and we wait for the increment before emitting traceGoSysExit. Note that the increment is done even if tracing is not enabled, because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
func reflectOffsLock ¶
func reflectOffsLock()
func reflectOffsUnlock ¶
func reflectOffsUnlock()
func reflect_addReflectOff ¶
func reflect_addReflectOff(ptr unsafe.Pointer) int32
reflect_addReflectOff adds a pointer to the reflection offset lookup map.
func reflect_chancap ¶
func reflect_chancap(c *hchan) int
func reflect_chanclose ¶
func reflect_chanclose(c *hchan)
func reflect_chanlen ¶
func reflect_chanlen(c *hchan) int
func reflect_chanrecv ¶
func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool)
func reflect_chansend ¶
func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool)
func reflect_gcbits ¶
func reflect_gcbits(x interface{}) []byte
gcbits returns the GC type info for x, for testing. The result is the bitmap entries (0 or 1), one entry per byte.
func reflect_ifaceE2I ¶
func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface)
func reflect_mapaccess ¶
func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
func reflect_mapassign ¶
func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer)
func reflect_mapdelete ¶
func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer)
func reflect_mapiterelem ¶
func reflect_mapiterelem(it *hiter) unsafe.Pointer
func reflect_mapiterkey ¶
func reflect_mapiterkey(it *hiter) unsafe.Pointer
func reflect_mapiternext ¶
func reflect_mapiternext(it *hiter)
func reflect_maplen ¶
func reflect_maplen(h *hmap) int
func reflect_memclrNoHeapPointers ¶
func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
func reflect_memmove ¶
func reflect_memmove(to, from unsafe.Pointer, n uintptr)
func reflect_resolveNameOff ¶
func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
reflect_resolveNameOff resolves a name offset from a base pointer.
func reflect_resolveTextOff ¶
func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
reflect_resolveTextOff resolves a function pointer offset from a base type.
func reflect_resolveTypeOff ¶
func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
reflect_resolveTypeOff resolves an *rtype offset from a base type.
func reflect_rselect ¶
func reflect_rselect(cases []runtimeSelect) (int, bool)
func reflect_typedmemclr ¶
func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer)
func reflect_typedmemclrpartial ¶
func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr)
func reflect_typedmemmove ¶
func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer)
func reflect_typedmemmovepartial ¶
func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr)
typedmemmovepartial is like typedmemmove but assumes that dst and src point off bytes into the value and only copies size bytes. off must be a multiple of sys.PtrSize.
func reflect_typedslicecopy ¶
func reflect_typedslicecopy(elemType *_type, dst, src slice) int
func reflect_typehash ¶
func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr
func reflect_typelinks ¶
func reflect_typelinks() ([]unsafe.Pointer, [][]int32)
func reflect_unsafe_New ¶
func reflect_unsafe_New(typ *_type) unsafe.Pointer
func reflect_unsafe_NewArray ¶
func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer
func reflectcall ¶
func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32)
reflectcall calls fn with a copy of the n argument bytes pointed at by arg. After fn returns, reflectcall copies n-retoffset result bytes back into arg+retoffset before returning. If copying result bytes back, the caller should pass the argument frame type as argtype, so that call can execute appropriate write barriers during the copy. Package reflect passes a frame type. In package runtime, there is only one call that copies results back, in cgocallbackg1, and it does NOT pass a frame type, meaning there are no write barriers invoked. See that call site for justification.
Package reflect accesses this symbol through a linkname.
func reflectcallSave ¶
func reflectcallSave(p *_panic, fn, arg unsafe.Pointer, argsize uint32)
reflectcallSave calls reflectcall after saving the caller's pc and sp in the panic record. This allows the runtime to return to the Goexit defer processing loop, in the unusual case where the Goexit may be bypassed by a successful recover.
func reflectcallmove ¶
func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr)
reflectcallmove is invoked by reflectcall to copy the return values out of the stack and into the heap, invoking the necessary write barriers. dst, src, and size describe the return value area to copy. typ describes the entire frame (not just the return values). typ may be nil, which indicates write barriers are not needed.
It must be nosplit and must only call nosplit functions because the stack map of reflectcall is wrong.
func reflectlite_chanlen ¶
func reflectlite_chanlen(c *hchan) int
func reflectlite_ifaceE2I ¶
func reflectlite_ifaceE2I(inter *interfacetype, e eface, dst *iface)
func reflectlite_maplen ¶
func reflectlite_maplen(h *hmap) int
func reflectlite_resolveNameOff ¶
func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
reflectlite_resolveNameOff resolves a name offset from a base pointer.
func reflectlite_resolveTypeOff ¶
func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
func reflectlite_typedmemmove ¶
func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer)
func reflectlite_unsafe_New ¶
func reflectlite_unsafe_New(typ *_type) unsafe.Pointer
func releaseLockRank ¶
func releaseLockRank(rank lockRank)
func releaseSudog ¶
func releaseSudog(s *sudog)
func releasem ¶
func releasem(mp *m)
func removefinalizer ¶
func removefinalizer(p unsafe.Pointer)
Removes the finalizer (if any) from the object p.
func resetForSleep ¶
func resetForSleep(gp *g, ut unsafe.Pointer) bool
resetForSleep is called after the goroutine is parked for timeSleep. We can't call resettimer in timeSleep itself because if this is a short sleep and there are many goroutines then the P can wind up running the timer function, goroutineReady, before the goroutine has been parked.
func resetTimer ¶
func resetTimer(t *timer, when int64) bool
resetTimer resets an inactive timer, adding it to the heap. Reports whether the timer was modified before it was run.
func resetspinning ¶
func resetspinning()
func resettimer ¶
func resettimer(t *timer, when int64) bool
resettimer resets the time when a timer should fire. If used for an inactive timer, the timer will become active. This should be called instead of addtimer if the timer value has been, or may have been, used previously. Reports whether the timer was modified before it was run.
func restoreGsignalStack ¶
func restoreGsignalStack(st *gsignalStack)
restoreGsignalStack restores the gsignal stack to the value it had before entering the signal handler.
func resumeG ¶
func resumeG(state suspendGState)
resumeG undoes the effects of suspendG, allowing the suspended goroutine to continue from its current safe-point.
func retake ¶
func retake(now int64) uint32
func retpolineAX ¶
func retpolineAX()
Retpolines, used by -spectre=ret flag in cmd/asm, cmd/compile.
func retpolineBP ¶
func retpolineBP()
func retpolineBX ¶
func retpolineBX()
func retpolineCX ¶
func retpolineCX()
func retpolineDI ¶
func retpolineDI()
func retpolineDX ¶
func retpolineDX()
func retpolineR10 ¶
func retpolineR10()
func retpolineR11 ¶
func retpolineR11()
func retpolineR12 ¶
func retpolineR12()
func retpolineR13 ¶
func retpolineR13()
func retpolineR14 ¶
func retpolineR14()
func retpolineR15 ¶
func retpolineR15()
func retpolineR8 ¶
func retpolineR8()
func retpolineR9 ¶
func retpolineR9()
func retpolineSI ¶
func retpolineSI()
func return0 ¶
func return0()
return0 is a stub used to return 0 from deferproc. It is called at the very end of deferproc to signal the calling Go function that it should not jump to deferreturn. in asm_*.s
func rotl_31 ¶
func rotl_31(x uint64) uint64
Note: in order to get the compiler to issue rotl instructions, we need to constant fold the shift amount by hand. TODO: convince the compiler to issue rotl instructions after inlining.
func round2 ¶
func round2(x int32) int32
round x up to a power of 2.
func roundupsize ¶
func roundupsize(size uintptr) uintptr
Returns size of the memory block that mallocgc will allocate if you ask for the size.
func rt0_go ¶
func rt0_go()
func rt_sigaction ¶
func rt_sigaction(sig uintptr, new, old *sigactiont, size uintptr) int32
rt_sigaction is implemented in assembly.
func rtsigprocmask ¶
func rtsigprocmask(how int32, new, old *sigset, size int32)
func runGCProg ¶
func runGCProg(prog, trailer, dst *byte, size int) uintptr
runGCProg executes the GC program prog, and then trailer if non-nil, writing to dst with entries of the given size. If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst. If size == 2, dst is the 2-bit heap bitmap, and writes move backward starting at dst (because the heap bitmap does). In this case, the caller guarantees that only whole bytes in dst need to be written.
runGCProg returns the number of 1- or 2-bit entries written to memory.
func runOneTimer ¶
func runOneTimer(pp *p, t *timer, now int64)
runOneTimer runs a single timer. The caller must have locked the timers for pp. This will temporarily unlock the timers while running the timer function.
func runOpenDeferFrame ¶
func runOpenDeferFrame(gp *g, d *_defer) bool
runOpenDeferFrame runs the active open-coded defers in the frame specified by d. It normally processes all active defers in the frame, but stops immediately if a defer does a successful recover. It returns true if there are no remaining defers to run in the frame.
func runSafePointFn ¶
func runSafePointFn()
runSafePointFn runs the safe point function, if any, for this P. This should be called like
if getg().m.p.runSafePointFn != 0 { runSafePointFn() }
runSafePointFn must be checked on any transition in to _Pidle or _Psyscall to avoid a race where forEachP sees that the P is running just before the P goes into _Pidle/_Psyscall and neither forEachP nor the P run the safe-point function.
func runfinq ¶
func runfinq()
This is the goroutine that runs all of the finalizers
func runqempty ¶
func runqempty(_p_ *p) bool
runqempty reports whether _p_ has no Gs on its local run queue. It never returns true spuriously.
func runqget ¶
func runqget(_p_ *p) (gp *g, inheritTime bool)
Get g from local runnable queue. If inheritTime is true, gp should inherit the remaining time in the current time slice. Otherwise, it should start a new time slice. Executed only by the owner P.
func runqgrab ¶
func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32
Grabs a batch of goroutines from _p_'s runnable queue into batch. Batch is a ring buffer starting at batchHead. Returns number of grabbed goroutines. Can be executed by any P.
func runqput ¶
func runqput(_p_ *p, gp *g, next bool)
runqput tries to put g on the local runnable queue. If next is false, runqput adds g to the tail of the runnable queue. If next is true, runqput puts g in the _p_.runnext slot. If the run queue is full, runnext puts g on the global queue. Executed only by the owner P.
func runqputbatch ¶
func runqputbatch(pp *p, q *gQueue, qsize int)
runqputbatch tries to put all the G's on q on the local runnable queue. If the queue is full, they are put on the global queue; in that case this will temporarily acquire the scheduler lock. Executed only by the owner P.
func runqputslow ¶
func runqputslow(_p_ *p, gp *g, h, t uint32) bool
Put g and a batch of work from local runnable queue on global queue. Executed only by the owner P.
func runtime_debug_WriteHeapDump ¶
func runtime_debug_WriteHeapDump(fd uintptr)
func runtime_debug_freeOSMemory ¶
func runtime_debug_freeOSMemory()
func runtime_expandFinalInlineFrame ¶
func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr
runtime_expandFinalInlineFrame expands the final pc in stk to include all "callers" if pc is inline.
func runtime_getProfLabel ¶
func runtime_getProfLabel() unsafe.Pointer
func runtime_goroutineProfileWithLabels ¶
func runtime_goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool)
func runtime_pprof_readProfile ¶
func runtime_pprof_readProfile() ([]uint64, []unsafe.Pointer, bool)
readProfile, provided to runtime/pprof, returns the next chunk of binary CPU profiling stack trace data, blocking until data is available. If profiling is turned off and all the profile data accumulated while it was on has been returned, readProfile returns eof=true. The caller must save the returned data and tags before calling readProfile again.
func runtime_pprof_runtime_cyclesPerSecond ¶
func runtime_pprof_runtime_cyclesPerSecond() int64
func runtime_setProfLabel ¶
func runtime_setProfLabel(labels unsafe.Pointer)
func runtimer ¶
func runtimer(pp *p, now int64) int64
runtimer examines the first timer in timers. If it is ready based on now, it runs the timer and removes or updates it. Returns 0 if it ran a timer, -1 if there are no more timers, or the time when the first timer should run. The caller must have locked the timers for pp. If a timer is run, this will temporarily unlock the timers.
func save ¶
func save(pc, sp uintptr)
save updates getg().sched to refer to pc and sp so that a following gogo will restore pc and sp.
save must not have write barriers because invoking a write barrier can clobber getg().sched.
func saveAncestors ¶
func saveAncestors(callergp *g) *[]ancestorInfo
saveAncestors copies previous ancestors of the given caller g and includes infor for the current caller into a new set of tracebacks for a g being created.
func saveblockevent ¶
func saveblockevent(cycles int64, skip int, which bucketType)
func saveg ¶
func saveg(pc, sp uintptr, gp *g, r *StackRecord)
func sbrk0 ¶
func sbrk0() uintptr
func scanConservative ¶
func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState)
scanConservative scans block [b, b+n) conservatively, treating any pointer-like value in the block as a pointer.
If ptrmask != nil, only words that are marked in ptrmask are considered as potential pointers.
If state != nil, it's assumed that [b, b+n) is a block in the stack and may contain pointers to stack objects.
func scanblock ¶
func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState)
scanblock scans b as scanobject would, but using an explicit pointer bitmap instead of the heap bitmap.
This is used to scan non-heap roots, so it does not update gcw.bytesMarked or gcw.scanWork.
If stk != nil, possible stack pointers are also reported to stk.putPtr.
func scanframeworker ¶
func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork)
Scan a stack frame: local variables and function arguments/results.
func scanobject ¶
func scanobject(b uintptr, gcw *gcWork)
scanobject scans the object starting at b, adding pointers to gcw. b must point to the beginning of a heap object or an oblet. scanobject consults the GC bitmap for the pointer mask and the spans for the size of the object.
func scanstack ¶
func scanstack(gp *g, gcw *gcWork)
scanstack scans gp's stack, greying all pointers found on the stack.
scanstack will also shrink the stack if it is safe to do so. If it is not, it schedules a stack shrink for the next synchronous safe point.
scanstack is marked go:systemstack because it must not be preempted while using a workbuf.
func scavengeSleep ¶
func scavengeSleep(ns int64) int64
scavengeSleep attempts to put the scavenger to sleep for ns.
Note that this function should only be called by the scavenger.
The scavenger may be woken up earlier by a pacing change, and it may not go to sleep at all if there's a pending pacing change.
Returns the amount of time actually slept.
func schedEnableUser ¶
func schedEnableUser(enable bool)
schedEnableUser enables or disables the scheduling of user goroutines.
This does not stop already running user goroutines, so the caller should first stop the world when disabling user goroutines.
func schedEnabled ¶
func schedEnabled(gp *g) bool
schedEnabled reports whether gp should be scheduled. It returns false is scheduling of gp is disabled.
func sched_getaffinity ¶
func sched_getaffinity(pid, len uintptr, buf *byte) int32
func schedinit ¶
func schedinit()
The bootstrap sequence is:
call osinit call schedinit make & queue new G call runtime·mstart
The new G calls runtime·main.
func schedtrace ¶
func schedtrace(detailed bool)
func schedule ¶
func schedule()
One round of scheduler: find a runnable goroutine and execute it. Never returns.
func selectgo ¶
func selectgo(cas0 *scase, order0 *uint16, ncases int) (int, bool)
selectgo implements the select statement.
cas0 points to an array of type [ncases]scase, and order0 points to an array of type [2*ncases]uint16 where ncases must be <= 65536. Both reside on the goroutine's stack (regardless of any escaping in selectgo).
selectgo returns the index of the chosen scase, which matches the ordinal position of its respective select{recv,send,default} call. Also, if the chosen scase was a receive operation, it reports whether a value was received.
func selectnbrecv ¶
func selectnbrecv(elem unsafe.Pointer, c *hchan) (selected bool)
compiler implements
select { case v = <-c: ... foo default: ... bar }
as
if selectnbrecv(&v, c) { ... foo } else { ... bar }
func selectnbrecv2 ¶
func selectnbrecv2(elem unsafe.Pointer, received *bool, c *hchan) (selected bool)
compiler implements
select { case v, ok = <-c: ... foo default: ... bar }
as
if c != nil && selectnbrecv2(&v, &ok, c) { ... foo } else { ... bar }
func selectnbsend ¶
func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool)
compiler implements
select { case c <- v: ... foo default: ... bar }
as
if selectnbsend(c, v) { ... foo } else { ... bar }
func selectsetpc ¶
func selectsetpc(cas *scase)
func sellock ¶
func sellock(scases []scase, lockorder []uint16)
func selparkcommit ¶
func selparkcommit(gp *g, _ unsafe.Pointer) bool
func selunlock ¶
func selunlock(scases []scase, lockorder []uint16)
func semacquire ¶
func semacquire(addr *uint32)
Called from runtime.
func semacquire1 ¶
func semacquire1(addr *uint32, lifo bool, profile semaProfileFlags, skipframes int)
func semrelease ¶
func semrelease(addr *uint32)
func semrelease1 ¶
func semrelease1(addr *uint32, handoff bool, skipframes int)
func send ¶
func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int)
send processes a send operation on an empty channel c. The value ep sent by the sender is copied to the receiver sg. The receiver is then woken up to go on its merry way. Channel c must be empty and locked. send unlocks c with unlockf. sg must already be dequeued from c. ep must be non-nil and point to the heap or the caller's stack.
func sendDirect ¶
func sendDirect(t *_type, sg *sudog, src unsafe.Pointer)
func setGCPercent ¶
func setGCPercent(in int32) (out int32)
func setGCPhase ¶
func setGCPhase(x uint32)
func setGNoWB ¶
func setGNoWB(gp **g, new *g)
setGNoWB performs *gp = new without a write barrier. For times when it's impractical to use a guintptr.
func setGsignalStack ¶
func setGsignalStack(st *stackt, old *gsignalStack)
setGsignalStack sets the gsignal stack of the current m to an alternate signal stack returned from the sigaltstack system call. It saves the old values in *old for use by restoreGsignalStack. This is used when handling a signal if non-Go code has set the alternate signal stack.
func setMNoWB ¶
func setMNoWB(mp **m, new *m)
setMNoWB performs *mp = new without a write barrier. For times when it's impractical to use an muintptr.
func setMaxStack ¶
func setMaxStack(in int) (out int)
func setMaxThreads ¶
func setMaxThreads(in int) (out int)
func setNonblock ¶
func setNonblock(fd int32)
func setPanicOnFault ¶
func setPanicOnFault(new bool) (old bool)
func setProcessCPUProfiler ¶
func setProcessCPUProfiler(hz int32)
setProcessCPUProfiler is called when the profiling timer changes. It is called with prof.lock held. hz is the new timer, and is 0 if profiling is being disabled. Enable or disable the signal as required for -buildmode=c-archive.
func setSignalstackSP ¶
func setSignalstackSP(s *stackt, sp uintptr)
setSignaltstackSP sets the ss_sp field of a stackt.
func setThreadCPUProfiler ¶
func setThreadCPUProfiler(hz int32)
setThreadCPUProfiler makes any thread-specific changes required to implement profiling at a rate of hz.
func setTraceback ¶
func setTraceback(level string)
func setcpuprofilerate ¶
func setcpuprofilerate(hz int32)
setcpuprofilerate sets the CPU profiling rate to hz times per second. If hz <= 0, setcpuprofilerate turns off CPU profiling.
func setg ¶
func setg(gg *g)
func setitimer ¶
func setitimer(mode int32, new, old *itimerval)
func setprofilebucket ¶
func setprofilebucket(p unsafe.Pointer, b *bucket)
Set the heap profile bucket associated with addr to b.
func setsSP ¶
func setsSP(pc uintptr) bool
Reports whether a function will set the SP to an absolute value. Important that we don't traceback when these are at the bottom of the stack since we can't be sure that we will find the caller.
If the function is not on the bottom of the stack we assume that it will have set it up so that traceback will be consistent, either by being a traceback terminating function or putting one on the stack at the right offset.
func setsig ¶
func setsig(i uint32, fn uintptr)
func setsigsegv ¶
func setsigsegv(pc uintptr)
setsigsegv is used on darwin/arm64 to fake a segmentation fault.
This is exported via linkname to assembly in runtime/cgo.
func setsigstack ¶
func setsigstack(i uint32)
func settls ¶
func settls()
Called from assembly only; declared for go vet.
func shade ¶
func shade(b uintptr)
Shade the object if it isn't already. The object is not nil and known to be in the heap. Preemption must be disabled.
func shouldPushSigpanic ¶
func shouldPushSigpanic(gp *g, pc, lr uintptr) bool
shouldPushSigpanic reports whether pc should be used as sigpanic's return PC (pushing a frame for the call). Otherwise, it should be left alone so that LR is used as sigpanic's return PC, effectively replacing the top-most frame with sigpanic. This is used by preparePanic.
func shouldStealTimers ¶
func shouldStealTimers(p2 *p) bool
shouldStealTimers reports whether we should try stealing the timers from p2. We don't steal timers from a running P that is not marked for preemption, on the assumption that it will run its own timers. This reduces contention on the timers lock.
func showframe ¶
func showframe(f funcInfo, gp *g, firstFrame bool, funcID, childID funcID) bool
showframe reports whether the frame with the given characteristics should be printed during a traceback.
func showfuncinfo ¶
func showfuncinfo(f funcInfo, firstFrame bool, funcID, childID funcID) bool
showfuncinfo reports whether a function with the given characteristics should be printed during a traceback.
func shrinkstack ¶
func shrinkstack(gp *g)
Maybe shrink the stack being used by gp.
gp must be stopped and we must own its stack. It may be in _Grunning, but only if this is our own user G.
func siftdownTimer ¶
func siftdownTimer(t []*timer, i int)
func siftupTimer ¶
func siftupTimer(t []*timer, i int)
func sigInitIgnored ¶
func sigInitIgnored(s uint32)
sigInitIgnored marks the signal as already ignored. This is called at program start by initsig. In a shared library initsig is called by libpreinit, so the runtime may not be initialized yet.
func sigInstallGoHandler ¶
func sigInstallGoHandler(sig uint32) bool
func sigNotOnStack ¶
func sigNotOnStack(sig uint32)
This is called if we receive a signal when there is a signal stack but we are not on it. This can only happen if non-Go code called sigaction without setting the SS_ONSTACK flag.
func sigNoteSetup ¶
func sigNoteSetup(*note)
func sigNoteSleep ¶
func sigNoteSleep(*note)
func sigNoteWakeup ¶
func sigNoteWakeup(*note)
func sigaction ¶
func sigaction(sig uint32, new, old *sigactiont)
func sigaddset ¶
func sigaddset(mask *sigset, i int)
func sigaltstack ¶
func sigaltstack(new, old *stackt)
func sigblock ¶
func sigblock()
sigblock blocks all signals in the current thread's signal mask. This is used to block signals while setting up and tearing down g when a non-Go thread calls a Go function. The OS-specific code is expected to define sigset_all. This is nosplit and nowritebarrierrec because it is called by needm which may be called on a non-Go thread with no g available.
func sigdelset ¶
func sigdelset(mask *sigset, i int)
func sigdisable ¶
func sigdisable(sig uint32)
sigdisable disables the Go signal handler for the signal sig. It is only called while holding the os/signal.handlers lock, via os/signal.disableSignal and signal_disable.
func sigenable ¶
func sigenable(sig uint32)
sigenable enables the Go signal handler to catch the signal sig. It is only called while holding the os/signal.handlers lock, via os/signal.enableSignal and signal_enable.
func sigfillset ¶
func sigfillset(mask *uint64)
func sigfwd ¶
func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)
func sigfwdgo ¶
func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool
Determines if the signal should be handled by Go and if not, forwards the signal to the handler that was installed before Go's. Returns whether the signal was forwarded. This is called by the signal handler, and the world may be stopped.
func sighandler ¶
func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g)
sighandler is invoked when a signal occurs. The global g will be set to a gsignal goroutine and we will be running on the alternate signal stack. The parameter g will be the value of the global g when the signal occurred. The sig, info, and ctxt parameters are from the system signal handler: they are the parameters passed when the SA is passed to the sigaction system call.
The garbage collector may have stopped the world, so write barriers are not allowed.
func sigignore ¶
func sigignore(sig uint32)
sigignore ignores the signal sig. It is only called while holding the os/signal.handlers lock, via os/signal.ignoreSignal and signal_ignore.
func signalDuringFork ¶
func signalDuringFork(sig uint32)
signalDuringFork is called if we receive a signal while doing a fork. We do not want signals at that time, as a signal sent to the process group may be delivered to the child process, causing confusion. This should never be called, because we block signals across the fork; this function is just a safety check. See issue 18600 for background.
func signalM ¶
func signalM(mp *m, sig int)
signalM sends a signal to mp.
func signalWaitUntilIdle ¶
func signalWaitUntilIdle()
signalWaitUntilIdle waits until the signal delivery mechanism is idle. This is used to ensure that we do not drop a signal notification due to a race between disabling a signal and receiving a signal. This assumes that signal delivery has already been disabled for the signal(s) in question, and here we are just waiting to make sure that all the signals have been delivered to the user channels by the os/signal package.
func signal_disable ¶
func signal_disable(s uint32)
Must only be called from a single goroutine at a time.
func signal_enable ¶
func signal_enable(s uint32)
Must only be called from a single goroutine at a time.
func signal_ignore ¶
func signal_ignore(s uint32)
Must only be called from a single goroutine at a time.
func signal_ignored ¶
func signal_ignored(s uint32) bool
Checked by signal handlers.
func signal_recv ¶
func signal_recv() uint32
Called to receive the next queued signal. Must only be called from a single goroutine at a time.
func signalstack ¶
func signalstack(s *stack)
signalstack sets the current thread's alternate signal stack to s.
func signame ¶
func signame(sig uint32) string
func sigpanic ¶
func sigpanic()
sigpanic turns a synchronous signal into a run-time panic. If the signal handler sees a synchronous panic, it arranges the stack to look like the function where the signal occurred called sigpanic, sets the signal's PC value to sigpanic, and returns from the signal handler. The effect is that the program will act as though the function that got the signal simply called sigpanic instead.
This must NOT be nosplit because the linker doesn't know where sigpanic calls can be injected.
The signal handler must not inject a call to sigpanic if getg().throwsplit, since sigpanic may need to grow the stack.
This is exported via linkname to assembly in runtime/cgo.
func sigpipe ¶
func sigpipe()
func sigprocmask ¶
func sigprocmask(how int32, new, old *sigset)
func sigprof ¶
func sigprof(pc, sp, lr uintptr, gp *g, mp *m)
Called if we receive a SIGPROF signal. Called by the signal handler, may run during STW.
func sigprofNonGo ¶
func sigprofNonGo()
sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread, and the signal handler collected a stack trace in sigprofCallers. When this is called, sigprofCallersUse will be non-zero. g is nil, and what we can do is very limited.
func sigprofNonGoPC ¶
func sigprofNonGoPC(pc uintptr)
sigprofNonGoPC is called when a profiling signal arrived on a non-Go thread and we have a single PC value, not a stack trace. g is nil, and what we can do is very limited.
func sigreturn ¶
func sigreturn()
func sigsave ¶
func sigsave(p *sigset)
sigsave saves the current thread's signal mask into *p. This is used to preserve the non-Go signal mask when a non-Go thread calls a Go function. This is nosplit and nowritebarrierrec because it is called by needm which may be called on a non-Go thread with no g available.
func sigsend ¶
func sigsend(s uint32) bool
sigsend delivers a signal from sighandler to the internal signal delivery queue. It reports whether the signal was sent. If not, the caller typically crashes the program. It runs from the signal handler, so it's limited in what it can do.
func sigtramp ¶
func sigtramp(sig uint32, info *siginfo, ctx unsafe.Pointer)
func sigtrampgo ¶
func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer)
sigtrampgo is called from the signal handler function, sigtramp, written in assembly code. This is called by the signal handler, and the world may be stopped.
It must be nosplit because getg() is still the G that was running (if any) when the signal was delivered, but it's (usually) called on the gsignal stack. Until this switches the G to gsignal, the stack bounds check won't work.
func skipPleaseUseCallersFrames ¶
func skipPleaseUseCallersFrames()
This function is defined in asm.s to be sizeofSkipFunction bytes long.
func slicebytetostring ¶
func slicebytetostring(buf *tmpBuf, ptr *byte, n int) (str string)
slicebytetostring converts a byte slice to a string. It is inserted by the compiler into generated code. ptr is a pointer to the first element of the slice; n is the length of the slice. Buf is a fixed-size buffer for the result, it is not nil if the result does not escape.
func slicebytetostringtmp ¶
func slicebytetostringtmp(ptr *byte, n int) (str string)
slicebytetostringtmp returns a "string" referring to the actual []byte bytes.
Callers need to ensure that the returned string will not be used after the calling goroutine modifies the original slice or synchronizes with another goroutine.
The function is only called when instrumenting and otherwise intrinsified by the compiler.
Some internal compiler optimizations use this function. - Used for m[T1{... Tn{..., string(k), ...} ...}] and m[string(k)]
where k is []byte, T1 to Tn is a nesting of struct and array literals.
- Used for "<"+string(b)+">" concatenation where b is []byte. - Used for string(b)=="foo" comparison where b is []byte.
func slicecopy ¶
func slicecopy(toPtr unsafe.Pointer, toLen int, fmPtr unsafe.Pointer, fmLen int, width uintptr) int
func slicerunetostring ¶
func slicerunetostring(buf *tmpBuf, a []rune) string
func slicestringcopy ¶
func slicestringcopy(toPtr *byte, toLen int, fm string) int
func socket ¶
func socket(domain int32, typ int32, prot int32) int32
func spanHasNoSpecials ¶
func spanHasNoSpecials(s *mspan)
spanHasNoSpecials marks a span as having no specials in the arena bitmap.
func spanHasSpecials ¶
func spanHasSpecials(s *mspan)
spanHasSpecials marks a span as having specials in the arena bitmap.
func stackcache_clear ¶
func stackcache_clear(c *mcache)
func stackcacherefill ¶
func stackcacherefill(c *mcache, order uint8)
stackcacherefill/stackcacherelease implement a global pool of stack segments. The pool is required to prevent unlimited growth of per-thread caches.
func stackcacherelease ¶
func stackcacherelease(c *mcache, order uint8)
func stackcheck ¶
func stackcheck()
stackcheck checks that SP is in range [g->stack.lo, g->stack.hi).
func stackfree ¶
func stackfree(stk stack)
stackfree frees an n byte stack allocation at stk.
stackfree must run on the system stack because it uses per-P resources and must not split the stack.
func stackinit ¶
func stackinit()
func stacklog2 ¶
func stacklog2(n uintptr) int
stacklog2 returns ⌊log_2(n)⌋.
func stackpoolfree ¶
func stackpoolfree(x gclinkptr, order uint8)
Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
func startTemplateThread ¶
func startTemplateThread()
startTemplateThread starts the template thread if it is not already running.
The calling thread must itself be in a known-good state.
func startTheWorld ¶
func startTheWorld()
startTheWorld undoes the effects of stopTheWorld.
func startTheWorldGC ¶
func startTheWorldGC()
startTheWorldGC undoes the effects of stopTheWorldGC.
func startTheWorldWithSema ¶
func startTheWorldWithSema(emitTraceEvent bool) int64
func startTimer ¶
func startTimer(t *timer)
startTimer adds t to the timer heap.
func startlockedm ¶
func startlockedm(gp *g)
Schedules the locked m to run the locked gp. May run during STW, so write barriers are not allowed.
func startm ¶
func startm(_p_ *p, spinning bool)
Schedules some M to run the p (creates an M if necessary). If p==nil, tries to get an idle P, if no idle P's does nothing. May run with m.p==nil, so write barriers are not allowed. If spinning is set, the caller has incremented nmspinning and startm will either decrement nmspinning or set m.spinning in the newly started M.
func startpanic_m ¶
func startpanic_m() bool
startpanic_m prepares for an unrecoverable panic.
It returns true if panic messages should be printed, or false if the runtime is in bad shape and should just print stacks.
It must not have write barriers even though the write barrier explicitly ignores writes once dying > 0. Write barriers still assume that g.m.p != nil, and this function may not have P in some contexts (e.g. a panic in a signal handler for a signal sent to an M with no P).
func step ¶
func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool)
step advances to the next pc, value pair in the encoded table.
func stopTheWorld ¶
func stopTheWorld(reason string)
stopTheWorld stops all P's from executing goroutines, interrupting all goroutines at GC safe points and records reason as the reason for the stop. On return, only the current goroutine's P is running. stopTheWorld must not be called from a system stack and the caller must not hold worldsema. The caller must call startTheWorld when other P's should resume execution.
stopTheWorld is safe for multiple goroutines to call at the same time. Each will execute its own stop, and the stops will be serialized.
This is also used by routines that do stack dumps. If the system is in panic or being exited, this may not reliably stop all goroutines.
func stopTheWorldGC ¶
func stopTheWorldGC(reason string)
stopTheWorldGC has the same effect as stopTheWorld, but blocks until the GC is not running. It also blocks a GC from starting until startTheWorldGC is called.
func stopTheWorldWithSema ¶
func stopTheWorldWithSema()
stopTheWorldWithSema is the core implementation of stopTheWorld. The caller is responsible for acquiring worldsema and disabling preemption first and then should stopTheWorldWithSema on the system stack:
semacquire(&worldsema, 0) m.preemptoff = "reason" systemstack(stopTheWorldWithSema)
When finished, the caller must either call startTheWorld or undo these three operations separately:
m.preemptoff = "" systemstack(startTheWorldWithSema) semrelease(&worldsema)
It is allowed to acquire worldsema once and then execute multiple startTheWorldWithSema/stopTheWorldWithSema pairs. Other P's are able to execute between successive calls to startTheWorldWithSema and stopTheWorldWithSema. Holding worldsema causes any other goroutines invoking stopTheWorld to block.
func stopTimer ¶
func stopTimer(t *timer) bool
stopTimer stops a timer. It reports whether t was stopped before being run.
func stoplockedm ¶
func stoplockedm()
Stops execution of the current m that is locked to a g until the g is runnable again. Returns with acquired P.
func stopm ¶
func stopm()
Stops execution of the current m until new work is available. Returns with acquired P.
func strequal ¶
func strequal(p, q unsafe.Pointer) bool
func strhash ¶
func strhash(p unsafe.Pointer, h uintptr) uintptr
func strhashFallback ¶
func strhashFallback(a unsafe.Pointer, h uintptr) uintptr
func stringDataOnStack ¶
func stringDataOnStack(s string) bool
stringDataOnStack reports whether the string's data is stored on the current goroutine's stack.
func stringHash ¶
func stringHash(s string, seed uintptr) uintptr
Testing adapters for hash quality tests (see hash_test.go)
func stringtoslicebyte ¶
func stringtoslicebyte(buf *tmpBuf, s string) []byte
func stringtoslicerune ¶
func stringtoslicerune(buf *[tmpStringBufSize]rune, s string) []rune
func subtract1 ¶
func subtract1(p *byte) *byte
subtract1 returns the byte pointer p-1.
nosplit because it is used during write barriers and must not be preempted.
func subtractb ¶
func subtractb(p *byte, n uintptr) *byte
subtractb returns the byte pointer p-n.
func sweepone ¶
func sweepone() uintptr
sweepone sweeps some unswept heap span and returns the number of pages returned to the heap, or ^uintptr(0) if there was nothing to sweep.
func sync_atomic_CompareAndSwapPointer ¶
func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
func sync_atomic_CompareAndSwapUintptr ¶
func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool
func sync_atomic_StorePointer ¶
func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer)
func sync_atomic_StoreUintptr ¶
func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr)
func sync_atomic_SwapPointer ¶
func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
func sync_atomic_SwapUintptr ¶
func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr
func sync_atomic_runtime_procPin ¶
func sync_atomic_runtime_procPin() int
func sync_atomic_runtime_procUnpin ¶
func sync_atomic_runtime_procUnpin()
func sync_fastrand ¶
func sync_fastrand() uint32
func sync_nanotime ¶
func sync_nanotime() int64
func sync_runtime_Semacquire ¶
func sync_runtime_Semacquire(addr *uint32)
func sync_runtime_SemacquireMutex ¶
func sync_runtime_SemacquireMutex(addr *uint32, lifo bool, skipframes int)
func sync_runtime_Semrelease ¶
func sync_runtime_Semrelease(addr *uint32, handoff bool, skipframes int)
func sync_runtime_canSpin ¶
func sync_runtime_canSpin(i int) bool
Active spinning for sync.Mutex.
func sync_runtime_doSpin ¶
func sync_runtime_doSpin()
func sync_runtime_procPin ¶
func sync_runtime_procPin() int
func sync_runtime_procUnpin ¶
func sync_runtime_procUnpin()
func sync_runtime_registerPoolCleanup ¶
func sync_runtime_registerPoolCleanup(f func())
func sync_throw ¶
func sync_throw(s string)
func syncadjustsudogs ¶
func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr
syncadjustsudogs adjusts gp's sudogs and copies the part of gp's stack they refer to while synchronizing with concurrent channel operations. It returns the number of bytes of stack copied.
func sysAlloc ¶
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer
Don't split the stack as this method may be invoked without a valid G, which prevents us from allocating more stack.
func sysFault ¶
func sysFault(v unsafe.Pointer, n uintptr)
func sysFree ¶
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64)
Don't split the stack as this function may be invoked without a valid G, which prevents us from allocating more stack.
func sysHugePage ¶
func sysHugePage(v unsafe.Pointer, n uintptr)
func sysMap ¶
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64)
func sysMmap ¶
func sysMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int)
sysMmap calls the mmap system call. It is implemented in assembly.
func sysMunmap ¶
func sysMunmap(addr unsafe.Pointer, n uintptr)
sysMunmap calls the munmap system call. It is implemented in assembly.
func sysReserve ¶
func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer
func sysReserveAligned ¶
func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr)
sysReserveAligned is like sysReserve, but the returned pointer is aligned to align bytes. It may reserve either n or n+align bytes, so it returns the size that was reserved.
func sysSigaction ¶
func sysSigaction(sig uint32, new, old *sigactiont)
sysSigaction calls the rt_sigaction system call.
func sysUnused ¶
func sysUnused(v unsafe.Pointer, n uintptr)
func sysUsed ¶
func sysUsed(v unsafe.Pointer, n uintptr)
func sysargs ¶
func sysargs(argc int32, argv **byte)
func sysauxv ¶
func sysauxv(auxv []uintptr) int
func syscall_Exit ¶
func syscall_Exit(code int)
func syscall_Getpagesize ¶
func syscall_Getpagesize() int
func syscall_runtime_AfterExec ¶
func syscall_runtime_AfterExec()
Called from syscall package after Exec.
func syscall_runtime_AfterFork ¶
func syscall_runtime_AfterFork()
Called from syscall package after fork in parent.
func syscall_runtime_AfterForkInChild ¶
func syscall_runtime_AfterForkInChild()
Called from syscall package after fork in child. It resets non-sigignored signals to the default handler, and restores the signal mask in preparation for the exec.
Because this might be called during a vfork, and therefore may be temporarily sharing address space with the parent process, this must not change any global variables or calling into C code that may do so.
func syscall_runtime_BeforeExec ¶
func syscall_runtime_BeforeExec()
Called from syscall package before Exec.
func syscall_runtime_BeforeFork ¶
func syscall_runtime_BeforeFork()
Called from syscall package before fork.
func syscall_runtime_envs ¶
func syscall_runtime_envs() []string
func syscall_setenv_c ¶
func syscall_setenv_c(k string, v string)
Update the C environment if cgo is loaded. Called from syscall.Setenv.
func syscall_unsetenv_c ¶
func syscall_unsetenv_c(k string)
Update the C environment if cgo is loaded. Called from syscall.unsetenv.
func sysmon ¶
func sysmon()
Always runs without a P, so write barriers are not allowed.
func systemstack ¶
func systemstack(fn func())
systemstack runs fn on a system stack. If systemstack is called from the per-OS-thread (g0) stack, or if systemstack is called from the signal handling (gsignal) stack, systemstack calls fn directly and returns. Otherwise, systemstack is being called from the limited stack of an ordinary goroutine. In this case, systemstack switches to the per-OS-thread stack, calls fn, and switches back. It is common to use a func literal as the argument, in order to share inputs and outputs with the code around the call to system stack:
... set up y ... systemstack(func() { x = bigcall(y) }) ... use x ...
func systemstack_switch ¶
func systemstack_switch()
func templateThread ¶
func templateThread()
templateThread is a thread in a known-good state that exists solely to start new threads in known-good states when the calling thread may not be in a good state.
Many programs never need this, so templateThread is started lazily when we first enter a state that might lead to running on a thread in an unknown state.
templateThread runs on an M without a P, so it must not have write barriers.
func testAtomic64 ¶
func testAtomic64()
func testdefersizes ¶
func testdefersizes()
Ensure that defer arg sizes that map to the same defer size class also map to the same malloc size class.
func tgkill ¶
func tgkill(tgid, tid, sig int)
func throw ¶
func throw(s string)
func throwBadKernel ¶
func throwBadKernel()
throwBadKernel is called, via throwReportQuirk, by throw.
func tickspersecond ¶
func tickspersecond() int64
Note: Called by runtime/pprof in addition to runtime code.
func timeSleep ¶
func timeSleep(ns int64)
timeSleep puts the current goroutine to sleep for at least ns nanoseconds.
func timeSleepUntil ¶
func timeSleepUntil() (int64, *p)
timeSleepUntil returns the time when the next timer should fire, and the P that holds the timer heap that that timer is on. This is only called by sysmon and checkdead.
func time_now ¶
func time_now() (sec int64, nsec int32, mono int64)
func timediv ¶
func timediv(v int64, div int32, rem *int32) int32
Poor mans 64-bit division. This is a very special function, do not use it if you are not sure what you are doing. int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions. Handles overflow in a time-specific manner. This keeps us within no-split stack limits on 32-bit processors.
func tooManyOverflowBuckets ¶
func tooManyOverflowBuckets(noverflow uint16, B uint8) bool
tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets. Note that most of these overflow buckets must be in sparse use; if use was dense, then we'd have already triggered regular map growth.
func tophash ¶
func tophash(hash uintptr) uint8
tophash calculates the tophash value for hash.
func topofstack ¶
func topofstack(f funcInfo, g0 bool) bool
Does f mark the top of a goroutine stack?
func totaldefersize ¶
func totaldefersize(siz uintptr) uintptr
total size of memory block for defer with arg size sz
func traceAcquireBuffer ¶
func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr)
traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
func traceAppend ¶
func traceAppend(buf []byte, v uint64) []byte
traceAppend appends v to buf in little-endian-base-128 encoding.
func traceEvent ¶
func traceEvent(ev byte, skip int, args ...uint64)
traceEvent writes a single event to trace buffer, flushing the buffer if necessary. ev is event type. If skip > 0, write current stack id as the last argument (skipping skip top frames). If skip = 0, this event type should contain a stack, but we don't want to collect and remember it for this particular call.
func traceEventLocked ¶
func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, skip int, args ...uint64)
func traceFrameForPC ¶
func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr)
traceFrameForPC records the frame information. It may allocate memory.
func traceFullQueue ¶
func traceFullQueue(buf traceBufPtr)
traceFullQueue queues buf into queue of full buffers.
func traceGCDone ¶
func traceGCDone()
func traceGCMarkAssistDone ¶
func traceGCMarkAssistDone()
func traceGCMarkAssistStart ¶
func traceGCMarkAssistStart()
func traceGCSTWDone ¶
func traceGCSTWDone()
func traceGCSTWStart ¶
func traceGCSTWStart(kind int)
func traceGCStart ¶
func traceGCStart()
func traceGCSweepDone ¶
func traceGCSweepDone()
func traceGCSweepSpan ¶
func traceGCSweepSpan(bytesSwept uintptr)
traceGCSweepSpan traces the sweep of a single page.
This may be called outside a traceGCSweepStart/traceGCSweepDone pair; however, it will not emit any trace events in this case.
func traceGCSweepStart ¶
func traceGCSweepStart()
traceGCSweepStart prepares to trace a sweep loop. This does not emit any events until traceGCSweepSpan is called.
traceGCSweepStart must be paired with traceGCSweepDone and there must be no preemption points between these two calls.
func traceGoCreate ¶
func traceGoCreate(newg *g, pc uintptr)
func traceGoEnd ¶
func traceGoEnd()
func traceGoPark ¶
func traceGoPark(traceEv byte, skip int)
func traceGoPreempt ¶
func traceGoPreempt()
func traceGoSched ¶
func traceGoSched()
func traceGoStart ¶
func traceGoStart()
func traceGoSysBlock ¶
func traceGoSysBlock(pp *p)
func traceGoSysCall ¶
func traceGoSysCall()
func traceGoSysExit ¶
func traceGoSysExit(ts int64)
func traceGoUnpark ¶
func traceGoUnpark(gp *g, skip int)
func traceGomaxprocs ¶
func traceGomaxprocs(procs int32)
func traceHeapAlloc ¶
func traceHeapAlloc()
func traceNextGC ¶
func traceNextGC()
func traceProcFree ¶
func traceProcFree(pp *p)
traceProcFree frees trace buffer associated with pp.
func traceProcStart ¶
func traceProcStart()
func traceProcStop ¶
func traceProcStop(pp *p)
func traceReleaseBuffer ¶
func traceReleaseBuffer(pid int32)
traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
func traceStackID ¶
func traceStackID(mp *m, buf []uintptr, skip int) uint64
func traceString ¶
func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr)
traceString adds a string to the trace.strings and returns the id.
func trace_userLog ¶
func trace_userLog(id uint64, category, message string)
func trace_userRegion ¶
func trace_userRegion(id, mode uint64, name string)
func trace_userTaskCreate ¶
func trace_userTaskCreate(id, parentID uint64, taskType string)
func trace_userTaskEnd ¶
func trace_userTaskEnd(id uint64)
func tracealloc ¶
func tracealloc(p unsafe.Pointer, size uintptr, typ *_type)
func traceback ¶
func traceback(pc, sp, lr uintptr, gp *g)
func traceback1 ¶
func traceback1(pc, sp, lr uintptr, gp *g, flags uint)
func tracebackCgoContext ¶
func tracebackCgoContext(pcbuf *uintptr, printing bool, ctxt uintptr, n, max int) int
tracebackCgoContext handles tracing back a cgo context value, from the context argument to setCgoTraceback, for the gentraceback function. It returns the new value of n.
func tracebackHexdump ¶
func tracebackHexdump(stk stack, frame *stkframe, bad uintptr)
tracebackHexdump hexdumps part of stk around frame.sp and frame.fp for debugging purposes. If the address bad is included in the hexdumped range, it will mark it as well.
func tracebackdefers ¶
func tracebackdefers(gp *g, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer)
Traceback over the deferred function calls. Report them like calls that have been invoked but not started executing yet.
func tracebackinit ¶
func tracebackinit()
func tracebackothers ¶
func tracebackothers(me *g)
func tracebacktrap ¶
func tracebacktrap(pc, sp, lr uintptr, gp *g)
tracebacktrap is like traceback but expects that the PC and SP were obtained from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or getcallerpc/getcallersp. Because they are from a trap instead of from a saved pair, the initial PC must not be rewound to the previous instruction. (All the saved pairs record a PC that is a return address, so we rewind it into the CALL instruction.) If gp.m.libcall{g,pc,sp} information is available, it uses that information in preference to the pc/sp/lr passed in.
func tracefree ¶
func tracefree(p unsafe.Pointer, size uintptr)
func tracegc ¶
func tracegc()
func typeBitsBulkBarrier ¶
func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr)
typeBitsBulkBarrier executes a write barrier for every pointer that would be copied from [src, src+size) to [dst, dst+size) by a memmove using the type bitmap to locate those pointer slots.
The type typ must correspond exactly to [src, src+size) and [dst, dst+size). dst, src, and size must be pointer-aligned. The type typ must have a plain bitmap, not a GC program. The only use of this function is in channel sends, and the 64 kB channel element limit takes care of this for us.
Must not be preempted because it typically runs right before memmove, and the GC must observe them as an atomic action.
Callers must perform cgo checks if writeBarrier.cgo.
func typedmemclr ¶
func typedmemclr(typ *_type, ptr unsafe.Pointer)
typedmemclr clears the typed memory at ptr with type typ. The memory at ptr must already be initialized (and hence in type-safe state). If the memory is being initialized for the first time, see memclrNoHeapPointers.
If the caller knows that typ has pointers, it can alternatively call memclrHasPointers.
func typedmemmove ¶
func typedmemmove(typ *_type, dst, src unsafe.Pointer)
typedmemmove copies a value of type t to dst from src. Must be nosplit, see #16026.
TODO: Perfect for go:nosplitrec since we can't have a safe point anywhere in the bulk barrier or memmove.
func typedslicecopy ¶
func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int
func typehash ¶
func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr
typehash computes the hash of the object of type t at address p. h is the seed. This function is seldom used. Most maps use for hashing either fixed functions (e.g. f32hash) or compiler-generated functions (e.g. for a type like struct { x, y string }). This implementation is slower but more general and is used for hashing interface types (called from interhash or nilinterhash, above) or for hashing in maps generated by reflect.MapOf (reflect_typehash, below). Note: this function must match the compiler generated functions exactly. See issue 37716.
func typelinksinit ¶
func typelinksinit()
typelinksinit scans the types from extra modules and builds the moduledata typemap used to de-duplicate type pointers.
func typesEqual ¶
func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool
typesEqual reports whether two types are equal.
Everywhere in the runtime and reflect packages, it is assumed that there is exactly one *_type per Go type, so that pointer equality can be used to test if types are equal. There is one place that breaks this assumption: buildmode=shared. In this case a type can appear as two different pieces of memory. This is hidden from the runtime and reflect package by the per-module typemap built in typelinksinit. It uses typesEqual to map types from later modules back into earlier ones.
Only typelinksinit needs this function.
func uname ¶
func uname(utsname *new_utsname) int
func unblocksig ¶
func unblocksig(sig uint32)
unblocksig removes sig from the current thread's signal mask. This is nosplit and nowritebarrierrec because it is called from dieFromSignal, which can be called by sigfwdgo while running in the signal handler, on the signal stack, with no g available.
func unlock ¶
func unlock(l *mutex)
func unlock2 ¶
func unlock2(l *mutex)
func unlockOSThread ¶
func unlockOSThread()
func unlockWithRank ¶
func unlockWithRank(l *mutex)
func unlockextra ¶
func unlockextra(mp *m)
func unminit ¶
func unminit()
Called from dropm to undo the effect of an minit.
func unminitSignals ¶
func unminitSignals()
unminitSignals is called from dropm, via unminit, to undo the effect of calling minit on a non-Go thread.
func unwindm ¶
func unwindm(restore *bool)
func updateTimer0When ¶
func updateTimer0When(pp *p)
updateTimer0When sets the P's timer0When field. The caller must have locked the timers for pp.
func updatememstats ¶
func updatememstats()
func usleep ¶
func usleep(usec uint32)
func vdsoFindVersion ¶
func vdsoFindVersion(info *vdsoInfo, ver *vdsoVersionKey) int32
func vdsoInitFromSysinfoEhdr ¶
func vdsoInitFromSysinfoEhdr(info *vdsoInfo, hdr *elfEhdr)
func vdsoParseSymbols ¶
func vdsoParseSymbols(info *vdsoInfo, version int32)
func vdsoauxv ¶
func vdsoauxv(tag, val uintptr)
func verifyTimerHeap ¶
func verifyTimerHeap(pp *p)
verifyTimerHeap verifies that the timer heap is in a valid state. This is only for debugging, and is only called if verifyTimers is true. The caller must have locked the timers.
func wakeNetPoller ¶
func wakeNetPoller(when int64)
wakeNetPoller wakes up the thread sleeping in the network poller, if there is one, and if it isn't going to wake up anyhow before the when argument.
func wakeScavenger ¶
func wakeScavenger()
wakeScavenger immediately unparks the scavenger if necessary.
May run without a P, but it may allocate, so it must not be called on any allocation path.
mheap_.lock, scavenge.lock, and sched.lock must not be held.
func wakep ¶
func wakep()
Tries to add one more P to execute G's. Called when a G is made runnable (newproc, ready).
func walltime ¶
func walltime() (sec int64, nsec int32)
func walltime1 ¶
func walltime1() (sec int64, nsec int32)
func wantAsyncPreempt ¶
func wantAsyncPreempt(gp *g) bool
wantAsyncPreempt returns whether an asynchronous preemption is queued for gp.
func wbBufFlush ¶
func wbBufFlush(dst *uintptr, src uintptr)
wbBufFlush flushes the current P's write barrier buffer to the GC workbufs. It is passed the slot and value of the write barrier that caused the flush so that it can implement cgocheck.
This must not have write barriers because it is part of the write barrier implementation.
This and everything it calls must be nosplit because 1) the stack contains untyped slots from gcWriteBarrier and 2) there must not be a GC safe point between the write barrier test in the caller and flushing the buffer.
TODO: A "go:nosplitrec" annotation would be perfect for this.
func wbBufFlush1 ¶
func wbBufFlush1(_p_ *p)
wbBufFlush1 flushes p's write barrier buffer to the GC work queue.
This must not have write barriers because it is part of the write barrier implementation, so this may lead to infinite loops or buffer corruption.
This must be non-preemptible because it uses the P's workbuf.
func wbBufFlush1Debug ¶
func wbBufFlush1Debug(old, buf1, buf2 uintptr, start *uintptr, next uintptr)
wbBufFlush1Debug is a temporary function for debugging issue #27993. It exists solely to add some context to the traceback.
func wirep ¶
func wirep(_p_ *p)
wirep is the first step of acquirep, which actually associates the current M to _p_. This is broken out so we can disallow write barriers for this part, since we don't yet have a P.
func write ¶
func write(fd uintptr, p unsafe.Pointer, n int32) int32
write must be nosplit on Windows (see write1)
func write1 ¶
func write1(fd uintptr, p unsafe.Pointer, n int32) int32
write calls the write system call. It returns a non-negative number of bytes written or a negative errno value.
func writeErr ¶
func writeErr(b []byte)
func writeheapdump_m ¶
func writeheapdump_m(fd uintptr)
type BlockProfileRecord ¶ 1.1
BlockProfileRecord describes blocking events originated at a particular call sequence (stack trace).
type BlockProfileRecord struct { Count int64 Cycles int64 StackRecord }
type Error ¶
The Error interface identifies a run time error.
type Error interface { error // RuntimeError is a no-op function but // serves to distinguish types that are run time // errors from ordinary errors: a type is a // run time error if it has a RuntimeError method. RuntimeError() }
type Frame ¶ 1.7
Frame is the information returned by Frames for each call frame.
type Frame struct { // PC is the program counter for the location in this frame. // For a frame that calls another frame, this will be the // program counter of a call instruction. Because of inlining, // multiple frames may have the same PC value, but different // symbolic information. PC uintptr // Func is the Func value of this call frame. This may be nil // for non-Go code or fully inlined functions. Func *Func // Function is the package path-qualified function name of // this call frame. If non-empty, this string uniquely // identifies a single function in the program. // This may be the empty string if not known. // If Func is not nil then Function == Func.Name(). Function string // File and Line are the file name and line number of the // location in this frame. For non-leaf frames, this will be // the location of a call. These may be the empty string and // zero, respectively, if not known. File string Line int // Entry point program counter for the function; may be zero // if not known. If Func is not nil then Entry == // Func.Entry(). Entry uintptr // The runtime's internal view of the function. This field // is set (funcInfo.valid() returns true) only for Go functions, // not for C functions. funcInfo funcInfo }
func allFrames ¶
func allFrames(pcs []uintptr) []Frame
allFrames returns all of the Frames corresponding to pcs.
func expandCgoFrames ¶
func expandCgoFrames(pc uintptr) []Frame
expandCgoFrames expands frame information for pc, known to be a non-Go function, using the cgoSymbolizer hook. expandCgoFrames returns nil if pc could not be expanded.
type Frames ¶ 1.7
Frames may be used to get function/file/line information for a slice of PC values returned by Callers.
type Frames struct { // callers is a slice of PCs that have not yet been expanded to frames. callers []uintptr // frames is a slice of Frames that have yet to be returned. frames []Frame frameStore [2]Frame }
▹ Example
func CallersFrames ¶ 1.7
func CallersFrames(callers []uintptr) *Frames
CallersFrames takes a slice of PC values returned by Callers and prepares to return function/file/line information. Do not change the slice until you are done with the Frames.
func (*Frames) Next ¶ 1.7
func (ci *Frames) Next() (frame Frame, more bool)
Next returns frame information for the next caller. If more is false, there are no more callers (the Frame value is valid).
type Func ¶
A Func represents a Go function in the running binary.
type Func struct {
opaque struct{} // unexported field to disallow conversions
}
func FuncForPC ¶
func FuncForPC(pc uintptr) *Func
FuncForPC returns a *Func describing the function that contains the given program counter address, or else nil.
If pc represents multiple functions because of inlining, it returns the *Func describing the innermost function, but with an entry of the outermost function.
func (*Func) Entry ¶
func (f *Func) Entry() uintptr
Entry returns the entry address of the function.
func (*Func) FileLine ¶
func (f *Func) FileLine(pc uintptr) (file string, line int)
FileLine returns the file name and line number of the source code corresponding to the program counter pc. The result will not be accurate if pc is not a program counter within f.
func (*Func) Name ¶
func (f *Func) Name() string
Name returns the name of the function.
func (*Func) funcInfo ¶
func (f *Func) funcInfo() funcInfo
func (*Func) raw ¶
func (f *Func) raw() *_func
type MemProfileRecord ¶
A MemProfileRecord describes the live objects allocated by a particular call sequence (stack trace).
type MemProfileRecord struct { AllocBytes, FreeBytes int64 // number of bytes allocated, freed AllocObjects, FreeObjects int64 // number of objects allocated, freed Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry }
func (*MemProfileRecord) InUseBytes ¶
func (r *MemProfileRecord) InUseBytes() int64
InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
func (*MemProfileRecord) InUseObjects ¶
func (r *MemProfileRecord) InUseObjects() int64
InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
func (*MemProfileRecord) Stack ¶
func (r *MemProfileRecord) Stack() []uintptr
Stack returns the stack trace associated with the record, a prefix of r.Stack0.
type MemStats ¶
A MemStats records statistics about the memory allocator.
type MemStats struct { // Alloc is bytes of allocated heap objects. // // This is the same as HeapAlloc (see below). Alloc uint64 // TotalAlloc is cumulative bytes allocated for heap objects. // // TotalAlloc increases as heap objects are allocated, but // unlike Alloc and HeapAlloc, it does not decrease when // objects are freed. TotalAlloc uint64 // Sys is the total bytes of memory obtained from the OS. // // Sys is the sum of the XSys fields below. Sys measures the // virtual address space reserved by the Go runtime for the // heap, stacks, and other internal data structures. It's // likely that not all of the virtual address space is backed // by physical memory at any given moment, though in general // it all was at some point. Sys uint64 // Lookups is the number of pointer lookups performed by the // runtime. // // This is primarily useful for debugging runtime internals. Lookups uint64 // Mallocs is the cumulative count of heap objects allocated. // The number of live objects is Mallocs - Frees. Mallocs uint64 // Frees is the cumulative count of heap objects freed. Frees uint64 // HeapAlloc is bytes of allocated heap objects. // // "Allocated" heap objects include all reachable objects, as // well as unreachable objects that the garbage collector has // not yet freed. Specifically, HeapAlloc increases as heap // objects are allocated and decreases as the heap is swept // and unreachable objects are freed. Sweeping occurs // incrementally between GC cycles, so these two processes // occur simultaneously, and as a result HeapAlloc tends to // change smoothly (in contrast with the sawtooth that is // typical of stop-the-world garbage collectors). HeapAlloc uint64 // HeapSys is bytes of heap memory obtained from the OS. // // HeapSys measures the amount of virtual address space // reserved for the heap. This includes virtual address space // that has been reserved but not yet used, which consumes no // physical memory, but tends to be small, as well as virtual // address space for which the physical memory has been // returned to the OS after it became unused (see HeapReleased // for a measure of the latter). // // HeapSys estimates the largest size the heap has had. HeapSys uint64 // HeapIdle is bytes in idle (unused) spans. // // Idle spans have no objects in them. These spans could be // (and may already have been) returned to the OS, or they can // be reused for heap allocations, or they can be reused as // stack memory. // // HeapIdle minus HeapReleased estimates the amount of memory // that could be returned to the OS, but is being retained by // the runtime so it can grow the heap without requesting more // memory from the OS. If this difference is significantly // larger than the heap size, it indicates there was a recent // transient spike in live heap size. HeapIdle uint64 // HeapInuse is bytes in in-use spans. // // In-use spans have at least one object in them. These spans // can only be used for other objects of roughly the same // size. // // HeapInuse minus HeapAlloc estimates the amount of memory // that has been dedicated to particular size classes, but is // not currently being used. This is an upper bound on // fragmentation, but in general this memory can be reused // efficiently. HeapInuse uint64 // HeapReleased is bytes of physical memory returned to the OS. // // This counts heap memory from idle spans that was returned // to the OS and has not yet been reacquired for the heap. HeapReleased uint64 // HeapObjects is the number of allocated heap objects. // // Like HeapAlloc, this increases as objects are allocated and // decreases as the heap is swept and unreachable objects are // freed. HeapObjects uint64 // StackInuse is bytes in stack spans. // // In-use stack spans have at least one stack in them. These // spans can only be used for other stacks of the same size. // // There is no StackIdle because unused stack spans are // returned to the heap (and hence counted toward HeapIdle). StackInuse uint64 // StackSys is bytes of stack memory obtained from the OS. // // StackSys is StackInuse, plus any memory obtained directly // from the OS for OS thread stacks (which should be minimal). StackSys uint64 // MSpanInuse is bytes of allocated mspan structures. MSpanInuse uint64 // MSpanSys is bytes of memory obtained from the OS for mspan // structures. MSpanSys uint64 // MCacheInuse is bytes of allocated mcache structures. MCacheInuse uint64 // MCacheSys is bytes of memory obtained from the OS for // mcache structures. MCacheSys uint64 // BuckHashSys is bytes of memory in profiling bucket hash tables. BuckHashSys uint64 // GCSys is bytes of memory in garbage collection metadata. GCSys uint64 // Go 1.2 // OtherSys is bytes of memory in miscellaneous off-heap // runtime allocations. OtherSys uint64 // Go 1.2 // NextGC is the target heap size of the next GC cycle. // // The garbage collector's goal is to keep HeapAlloc ≤ NextGC. // At the end of each GC cycle, the target for the next cycle // is computed based on the amount of reachable data and the // value of GOGC. NextGC uint64 // LastGC is the time the last garbage collection finished, as // nanoseconds since 1970 (the UNIX epoch). LastGC uint64 // PauseTotalNs is the cumulative nanoseconds in GC // stop-the-world pauses since the program started. // // During a stop-the-world pause, all goroutines are paused // and only the garbage collector can run. PauseTotalNs uint64 // PauseNs is a circular buffer of recent GC stop-the-world // pause times in nanoseconds. // // The most recent pause is at PauseNs[(NumGC+255)%256]. In // general, PauseNs[N%256] records the time paused in the most // recent N%256th GC cycle. There may be multiple pauses per // GC cycle; this is the sum of all pauses during a cycle. PauseNs [256]uint64 // PauseEnd is a circular buffer of recent GC pause end times, // as nanoseconds since 1970 (the UNIX epoch). // // This buffer is filled the same way as PauseNs. There may be // multiple pauses per GC cycle; this records the end of the // last pause in a cycle. PauseEnd [256]uint64 // Go 1.4 // NumGC is the number of completed GC cycles. NumGC uint32 // NumForcedGC is the number of GC cycles that were forced by // the application calling the GC function. NumForcedGC uint32 // Go 1.8 // GCCPUFraction is the fraction of this program's available // CPU time used by the GC since the program started. // // GCCPUFraction is expressed as a number between 0 and 1, // where 0 means GC has consumed none of this program's CPU. A // program's available CPU time is defined as the integral of // GOMAXPROCS since the program started. That is, if // GOMAXPROCS is 2 and a program has been running for 10 // seconds, its "available CPU" is 20 seconds. GCCPUFraction // does not include CPU time used for write barrier activity. // // This is the same as the fraction of CPU reported by // GODEBUG=gctrace=1. GCCPUFraction float64 // Go 1.5 // EnableGC indicates that GC is enabled. It is always true, // even if GOGC=off. EnableGC bool // DebugGC is currently unused. DebugGC bool // BySize reports per-size class allocation statistics. // // BySize[N] gives statistics for allocations of size S where // BySize[N-1].Size < S ≤ BySize[N].Size. // // This does not report allocations larger than BySize[60].Size. BySize [61]struct { // Size is the maximum byte size of an object in this // size class. Size uint32 // Mallocs is the cumulative count of heap objects // allocated in this size class. The cumulative bytes // of allocation is Size*Mallocs. The number of live // objects in this size class is Mallocs - Frees. Mallocs uint64 // Frees is the cumulative count of heap objects freed // in this size class. Frees uint64 } }
type StackRecord ¶
A StackRecord describes a single execution stack.
type StackRecord struct { Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry }
func (*StackRecord) Stack ¶
func (r *StackRecord) Stack() []uintptr
Stack returns the stack trace associated with the record, a prefix of r.Stack0.
type TypeAssertionError ¶
A TypeAssertionError explains a failed type assertion.
type TypeAssertionError struct { _interface *_type concrete *_type asserted *_type missingMethod string // one method needed by Interface, missing from Concrete }
func (*TypeAssertionError) Error ¶
func (e *TypeAssertionError) Error() string
func (*TypeAssertionError) RuntimeError ¶
func (*TypeAssertionError) RuntimeError()
type _defer ¶
A _defer holds an entry on the list of deferred calls. If you add a field here, add code to clear it in freedefer and deferProcStack This struct must match the code in cmd/compile/internal/gc/reflect.go:deferstruct and cmd/compile/internal/gc/ssa.go:(*state).call. Some defers will be allocated on the stack and some on the heap. All defers are logically part of the stack, so write barriers to initialize them are not required. All defers must be manually scanned, and for heap defers, marked.
type _defer struct { siz int32 // includes both arguments and results started bool heap bool // openDefer indicates that this _defer is for a frame with open-coded // defers. We have only one defer record for the entire frame (which may // currently have 0, 1, or more defers active). openDefer bool sp uintptr // sp at time of defer pc uintptr // pc at time of defer fn *funcval // can be nil for open-coded defers _panic *_panic // panic that is running defer link *_defer // If openDefer is true, the fields below record values about the stack // frame and associated function that has the open-coded defer(s). sp // above will be the sp for the frame, and pc will be address of the // deferreturn call in the function. fd unsafe.Pointer // funcdata for the function associated with the frame varp uintptr // value of varp for the stack frame // framepc is the current pc associated with the stack frame. Together, // with sp above (which is the sp associated with the stack frame), // framepc/sp can be used as pc/sp pair to continue a stack trace via // gentraceback(). framepc uintptr }
func newdefer ¶
func newdefer(siz int32) *_defer
Allocate a Defer, usually using per-P pool. Each defer must be released with freedefer. The defer is not added to any defer chain yet.
This must not grow the stack because there may be a frame without stack map information when this is called.
type _func ¶
Layout of in-memory per-function information prepared by linker See https://golang.org/s/go12symtab. Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab) and with package debug/gosym and with symtab.go in package runtime.
type _func struct { entry uintptr // start pc nameoff int32 // function name args int32 // in/out args size deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. pcsp int32 pcfile int32 pcln int32 npcdata int32 funcID funcID // set for certain special runtime functions _ [2]int8 // unused nfuncdata uint8 // must be last }
type _panic ¶
A _panic holds information about an active panic.
This is marked go:notinheap because _panic values must only ever live on the stack.
The argp and link fields are stack pointers, but don't need special handling during stack growth: because they are pointer-typed and _panic values only live on the stack, regular stack pointer adjustment takes care of them.
type _panic struct { argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink arg interface{} // argument to panic link *_panic // link to earlier panic pc uintptr // where to return to in runtime if this panic is bypassed sp unsafe.Pointer // where to return to in runtime if this panic is bypassed recovered bool // whether this panic is over aborted bool // the panic was aborted goexit bool }
type _type ¶
Needs to be in sync with ../cmd/link/internal/ld/decodesym.go:/^func.commonsize, ../cmd/compile/internal/gc/reflect.go:/^func.dcommontype and ../reflect/type.go:/^type.rtype. ../internal/reflectlite/type.go:/^type.rtype.
type _type struct { size uintptr ptrdata uintptr // size of memory prefix holding all pointers hash uint32 tflag tflag align uint8 fieldAlign uint8 kind uint8 // function for comparing objects of this type // (ptr to object A, ptr to object B) -> ==? equal func(unsafe.Pointer, unsafe.Pointer) bool // gcdata stores the GC type data for the garbage collector. // If the KindGCProg bit is set in kind, gcdata is a GC program. // Otherwise it is a ptrmask bitmap. See mbitmap.go for details. gcdata *byte str nameOff ptrToThis typeOff }
var ( pdEface interface{} = (*pollDesc)(nil) pdType *_type = efaceOf(&pdEface)._type )
var deferType *_type // type of _defer struct
func resolveTypeOff ¶
func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type
func (*_type) name ¶
func (t *_type) name() string
func (*_type) nameOff ¶
func (t *_type) nameOff(off nameOff) name
func (*_type) pkgpath ¶
func (t *_type) pkgpath() string
pkgpath returns the path of the package where t was defined, if available. This is not the same as the reflect package's PkgPath method, in that it returns the package path for struct and interface types, not just named types.
func (*_type) string ¶
func (t *_type) string() string
func (*_type) textOff ¶
func (t *_type) textOff(off textOff) unsafe.Pointer
func (*_type) typeOff ¶
func (t *_type) typeOff(off typeOff) *_type
func (*_type) uncommon ¶
func (t *_type) uncommon() *uncommontype
type _typePair ¶
type _typePair struct { t1 *_type t2 *_type }
type addrRange ¶
addrRange represents a region of address space.
An addrRange must never span a gap in the address space.
type addrRange struct { // base and limit together represent the region of address space // [base, limit). That is, base is inclusive, limit is exclusive. // These are address over an offset view of the address space on // platforms with a segmented address space, that is, on platforms // where arenaBaseOffset != 0. base, limit offAddr }
func makeAddrRange ¶
func makeAddrRange(base, limit uintptr) addrRange
makeAddrRange creates a new address range from two virtual addresses.
Throws if the base and limit are not in the same memory segment.
func (addrRange) contains ¶
func (a addrRange) contains(addr uintptr) bool
contains returns whether or not the range contains a given address.
func (addrRange) removeGreaterEqual ¶
func (a addrRange) removeGreaterEqual(addr uintptr) addrRange
removeGreaterEqual removes all addresses in a greater than or equal to addr and returns the new range.
func (addrRange) size ¶
func (a addrRange) size() uintptr
size returns the size of the range represented in bytes.
func (addrRange) subtract ¶
func (a addrRange) subtract(b addrRange) addrRange
subtract takes the addrRange toPrune and cuts out any overlap with from, then returns the new range. subtract assumes that a and b either don't overlap at all, only overlap on one side, or are equal. If b is strictly contained in a, thus forcing a split, it will throw.
type addrRanges ¶
addrRanges is a data structure holding a collection of ranges of address space.
The ranges are coalesced eagerly to reduce the number ranges it holds.
The slice backing store for this field is persistentalloc'd and thus there is no way to free it.
addrRanges is not thread-safe.
type addrRanges struct { // ranges is a slice of ranges sorted by base. ranges []addrRange // totalBytes is the total amount of address space in bytes counted by // this addrRanges. totalBytes uintptr // sysStat is the stat to track allocations by this type sysStat *uint64 }
func (*addrRanges) add ¶
func (a *addrRanges) add(r addrRange)
add inserts a new address range to a.
r must not overlap with any address range in a.
func (*addrRanges) cloneInto ¶
func (a *addrRanges) cloneInto(b *addrRanges)
cloneInto makes a deep clone of a's state into b, re-using b's ranges if able.
func (*addrRanges) contains ¶
func (a *addrRanges) contains(addr uintptr) bool
contains returns true if a covers the address addr.
func (*addrRanges) findAddrGreaterEqual ¶
func (a *addrRanges) findAddrGreaterEqual(addr uintptr) (uintptr, bool)
findAddrGreaterEqual returns the smallest address represented by a that is >= addr. Thus, if the address is represented by a, then it returns addr. The second return value indicates whether such an address exists for addr in a. That is, if addr is larger than any address known to a, the second return value will be false.
func (*addrRanges) findSucc ¶
func (a *addrRanges) findSucc(addr uintptr) int
findSucc returns the first index in a such that base is less than the base of the addrRange at that index.
func (*addrRanges) init ¶
func (a *addrRanges) init(sysStat *uint64)
func (*addrRanges) removeGreaterEqual ¶
func (a *addrRanges) removeGreaterEqual(addr uintptr)
removeGreaterEqual removes the ranges of a which are above addr, and additionally splits any range containing addr.
func (*addrRanges) removeLast ¶
func (a *addrRanges) removeLast(nBytes uintptr) addrRange
removeLast removes and returns the highest-addressed contiguous range of a, or the last nBytes of that range, whichever is smaller. If a is empty, it returns an empty range.
type adjustinfo ¶
type adjustinfo struct { old stack delta uintptr // ptr distance from old to new stack (newbase - oldbase) cache pcvalueCache // sghi is the highest sudog.elem on the stack. sghi uintptr }
type ancestorInfo ¶
ancestorInfo records details of where a goroutine was started.
type ancestorInfo struct { pcs []uintptr // pcs from the stack of this goroutine goid int64 // goroutine id of this goroutine; original goroutine possibly dead gopc uintptr // pc of go statement that created this goroutine }
type arenaHint ¶
arenaHint is a hint for where to grow the heap arenas. See mheap_.arenaHints.
type arenaHint struct { addr uintptr down bool next *arenaHint }
type arenaIdx ¶
type arenaIdx uint
func arenaIndex ¶
func arenaIndex(p uintptr) arenaIdx
arenaIndex returns the index into mheap_.arenas of the arena containing metadata for p. This index combines of an index into the L1 map and an index into the L2 map and should be used as mheap_.arenas[ai.l1()][ai.l2()].
If p is outside the range of valid heap addresses, either l1() or l2() will be out of bounds.
It is nosplit because it's called by spanOf and several other nosplit functions.
func (arenaIdx) l1 ¶
func (i arenaIdx) l1() uint
func (arenaIdx) l2 ¶
func (i arenaIdx) l2() uint
type arraytype ¶
type arraytype struct { typ _type elem *_type slice *_type len uintptr }
type bitvector ¶
Information from the compiler about the layout of stack frames. Note: this type must agree with reflect.bitVector.
type bitvector struct { n int32 // # of bits bytedata *uint8 }
func makeheapobjbv ¶
func makeheapobjbv(p uintptr, size uintptr) bitvector
func progToPointerMask ¶
func progToPointerMask(prog *byte, size uintptr) bitvector
progToPointerMask returns the 1-bit pointer mask output by the GC program prog. size the size of the region described by prog, in bytes. The resulting bitvector will have no more than size/sys.PtrSize bits.
func stackmapdata ¶
func stackmapdata(stkmap *stackmap, n int32) bitvector
func (*bitvector) ptrbit ¶
func (bv *bitvector) ptrbit(i uintptr) uint8
ptrbit returns the i'th bit in bv. ptrbit is less efficient than iterating directly over bitvector bits, and should only be used in non-performance-critical code. See adjustpointers for an example of a high-efficiency walk of a bitvector.
type blockRecord ¶
A blockRecord is the bucket data for a bucket of type blockProfile, which is used in blocking and mutex profiles.
type blockRecord struct { count int64 cycles int64 }
type bmap ¶
A bucket for a Go map.
type bmap struct { // tophash generally contains the top byte of the hash value // for each key in this bucket. If tophash[0] < minTopHash, // tophash[0] is a bucket evacuation state instead. tophash [bucketCnt]uint8 }
func makeBucketArray ¶
func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap)
makeBucketArray initializes a backing array for map buckets. 1<<b is the minimum number of buckets to allocate. dirtyalloc should either be nil or a bucket array previously allocated by makeBucketArray with the same t and b parameters. If dirtyalloc is nil a new backing array will be alloced and otherwise dirtyalloc will be cleared and reused as backing array.
func (*bmap) keys ¶
func (b *bmap) keys() unsafe.Pointer
func (*bmap) overflow ¶
func (b *bmap) overflow(t *maptype) *bmap
func (*bmap) setoverflow ¶
func (b *bmap) setoverflow(t *maptype, ovf *bmap)
type boundsError ¶
A boundsError represents an indexing or slicing operation gone wrong.
type boundsError struct { x int64 y int // Values in an index or slice expression can be signed or unsigned. // That means we'd need 65 bits to encode all possible indexes, from -2^63 to 2^64-1. // Instead, we keep track of whether x should be interpreted as signed or unsigned. // y is known to be nonnegative and to fit in an int. signed bool code boundsErrorCode }
func (boundsError) Error ¶
func (e boundsError) Error() string
func (boundsError) RuntimeError ¶
func (e boundsError) RuntimeError()
type boundsErrorCode ¶
type boundsErrorCode uint8
const ( boundsIndex boundsErrorCode = iota // s[x], 0 <= x < len(s) failed boundsSliceAlen // s[?:x], 0 <= x <= len(s) failed boundsSliceAcap // s[?:x], 0 <= x <= cap(s) failed boundsSliceB // s[x:y], 0 <= x <= y failed (but boundsSliceA didn't happen) boundsSlice3Alen // s[?:?:x], 0 <= x <= len(s) failed boundsSlice3Acap // s[?:?:x], 0 <= x <= cap(s) failed boundsSlice3B // s[?:x:y], 0 <= x <= y failed (but boundsSlice3A didn't happen) boundsSlice3C // s[x:y:?], 0 <= x <= y failed (but boundsSlice3A/B didn't happen) )
type bucket ¶
A bucket holds per-call-stack profiling information. The representation is a bit sleazy, inherited from C. This struct defines the bucket header. It is followed in memory by the stack words and then the actual record data, either a memRecord or a blockRecord.
Per-call-stack profiling information. Lookup by hashing call stack into a linked-list hash table.
No heap pointers.
type bucket struct { next *bucket allnext *bucket typ bucketType // memBucket or blockBucket (includes mutexProfile) hash uintptr size uintptr nstk uintptr }
func newBucket ¶
func newBucket(typ bucketType, nstk int) *bucket
newBucket allocates a bucket with the given type and number of stack entries.
func stkbucket ¶
func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket
Return the bucket for stk[0:nstk], allocating new bucket if needed.
func (*bucket) bp ¶
func (b *bucket) bp() *blockRecord
bp returns the blockRecord associated with the blockProfile bucket b.
func (*bucket) mp ¶
func (b *bucket) mp() *memRecord
mp returns the memRecord associated with the memProfile bucket b.
func (*bucket) stk ¶
func (b *bucket) stk() []uintptr
stk returns the slice in b holding the stack.
type bucketType ¶
type bucketType int
const ( // profile types memProfile bucketType = 1 + iota blockProfile mutexProfile // size of bucket hash table buckHashSize = 179999 // max depth of stack to record in bucket maxStack = 32 )
type cgoCallers ¶
Addresses collected in a cgo backtrace when crashing. Length must match arg.Max in x_cgo_callers in runtime/cgo/gcc_traceback.c.
type cgoCallers [32]uintptr
If the signal handler receives a SIGPROF signal on a non-Go thread, it tries to collect a traceback into sigprofCallers. sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback.
var sigprofCallers cgoCallers
type cgoContextArg ¶
cgoContextArg is the type passed to the context function.
type cgoContextArg struct { context uintptr }
type cgoSymbolizerArg ¶
cgoSymbolizerArg is the type passed to cgoSymbolizer.
type cgoSymbolizerArg struct { pc uintptr file *byte lineno uintptr funcName *byte entry uintptr more uintptr data uintptr }
type cgoTracebackArg ¶
cgoTracebackArg is the type passed to cgoTraceback.
type cgoTracebackArg struct { context uintptr sigContext uintptr buf *uintptr max uintptr }
type cgothreadstart ¶
type cgothreadstart struct { g guintptr tls *uint64 fn unsafe.Pointer }
type chantype ¶
type chantype struct { typ _type elem *_type dir uintptr }
type childInfo ¶
type childInfo struct { // Information passed up from the callee frame about // the layout of the outargs region. argoff uintptr // where the arguments start in the frame arglen uintptr // size of args region args bitvector // if args.n >= 0, pointer map of args region sp *uint8 // callee sp depth uintptr // depth in call stack (0 == most recent) }
type chunkIdx ¶
Global chunk index.
Represents an index into the leaf level of the radix tree. Similar to arenaIndex, except instead of arenas, it divides the address space into chunks.
type chunkIdx uint
func chunkIndex ¶
func chunkIndex(p uintptr) chunkIdx
chunkIndex returns the global index of the palloc chunk containing the pointer p.
func (chunkIdx) l1 ¶
func (i chunkIdx) l1() uint
l1 returns the index into the first level of (*pageAlloc).chunks.
func (chunkIdx) l2 ¶
func (i chunkIdx) l2() uint
l2 returns the index into the second level of (*pageAlloc).chunks.
type cpuProfile ¶
type cpuProfile struct { lock mutex on bool // profiling is on log *profBuf // profile events written here // extra holds extra stacks accumulated in addNonGo // corresponding to profiling signals arriving on // non-Go-created threads. Those stacks are written // to log the next time a normal Go thread gets the // signal handler. // Assuming the stacks are 2 words each (we don't get // a full traceback from those threads), plus one word // size for framing, 100 Hz profiling would generate // 300 words per second. // Hopefully a normal Go thread will get the profiling // signal at least once every few seconds. extra [1000]uintptr numExtra int lostExtra uint64 // count of frames lost because extra is full lostAtomic uint64 // count of frames lost because of being in atomic64 on mips/arm; updated racily }
var cpuprof cpuProfile
func (*cpuProfile) add ¶
func (p *cpuProfile) add(gp *g, stk []uintptr)
add adds the stack trace to the profile. It is called from signal handlers and other limited environments and cannot allocate memory or acquire locks that might be held at the time of the signal, nor can it use substantial amounts of stack.
func (*cpuProfile) addExtra ¶
func (p *cpuProfile) addExtra()
addExtra adds the "extra" profiling events, queued by addNonGo, to the profile log. addExtra is called either from a signal handler on a Go thread or from an ordinary goroutine; either way it can use stack and has a g. The world may be stopped, though.
func (*cpuProfile) addNonGo ¶
func (p *cpuProfile) addNonGo(stk []uintptr)
addNonGo adds the non-Go stack trace to the profile. It is called from a non-Go thread, so we cannot use much stack at all, nor do anything that needs a g or an m. In particular, we can't call cpuprof.log.write. Instead, we copy the stack into cpuprof.extra, which will be drained the next time a Go thread gets the signal handling event.
type dbgVar ¶
type dbgVar struct { name string value *int32 }
type debugLogBuf ¶
type debugLogBuf [debugLogBytes]byte
type debugLogReader ¶
type debugLogReader struct { data *debugLogBuf // begin and end are the positions in the log of the beginning // and end of the log data, modulo len(data). begin, end uint64 // tick and nano are the current time base at begin. tick, nano uint64 }
func (*debugLogReader) header ¶
func (r *debugLogReader) header() (end, tick, nano uint64, p int)
func (*debugLogReader) peek ¶
func (r *debugLogReader) peek() (tick uint64)
func (*debugLogReader) printVal ¶
func (r *debugLogReader) printVal() bool
func (*debugLogReader) readUint16LEAt ¶
func (r *debugLogReader) readUint16LEAt(pos uint64) uint16
func (*debugLogReader) readUint64LEAt ¶
func (r *debugLogReader) readUint64LEAt(pos uint64) uint64
func (*debugLogReader) skip ¶
func (r *debugLogReader) skip() uint64
func (*debugLogReader) uvarint ¶
func (r *debugLogReader) uvarint() uint64
func (*debugLogReader) varint ¶
func (r *debugLogReader) varint() int64
type debugLogWriter ¶
A debugLogWriter is a ring buffer of binary debug log records.
A log record consists of a 2-byte framing header and a sequence of fields. The framing header gives the size of the record as a little endian 16-bit value. Each field starts with a byte indicating its type, followed by type-specific data. If the size in the framing header is 0, it's a sync record consisting of two little endian 64-bit values giving a new time base.
Because this is a ring buffer, new records will eventually overwrite old records. Hence, it maintains a reader that consumes the log as it gets overwritten. That reader state is where an actual log reader would start.
type debugLogWriter struct { write uint64 data debugLogBuf // tick and nano are the time bases from the most recently // written sync record. tick, nano uint64 // r is a reader that consumes records as they get overwritten // by the writer. It also acts as the initial reader state // when printing the log. r debugLogReader // buf is a scratch buffer for encoding. This is here to // reduce stack usage. buf [10]byte }
func (*debugLogWriter) byte ¶
func (l *debugLogWriter) byte(x byte)
func (*debugLogWriter) bytes ¶
func (l *debugLogWriter) bytes(x []byte)
func (*debugLogWriter) ensure ¶
func (l *debugLogWriter) ensure(n uint64)
func (*debugLogWriter) uvarint ¶
func (l *debugLogWriter) uvarint(u uint64)
func (*debugLogWriter) varint ¶
func (l *debugLogWriter) varint(x int64)
func (*debugLogWriter) writeFrameAt ¶
func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool
func (*debugLogWriter) writeSync ¶
func (l *debugLogWriter) writeSync(tick, nano uint64)
func (*debugLogWriter) writeUint64LE ¶
func (l *debugLogWriter) writeUint64LE(x uint64)
type divMagic ¶
type divMagic struct { shift uint8 shift2 uint8 mul uint16 baseMask uint16 }
type dlogPerM ¶
type dlogPerM struct{}
type dlogger ¶
A dlogger writes to the debug log.
To obtain a dlogger, call dlog(). When done with the dlogger, call end().
type dlogger struct { w debugLogWriter // allLink is the next dlogger in the allDloggers list. allLink *dlogger // owned indicates that this dlogger is owned by an M. This is // accessed atomically. owned uint32 }
allDloggers is a list of all dloggers, linked through dlogger.allLink. This is accessed atomically. This is prepend only, so it doesn't need to protect against ABA races.
var allDloggers *dlogger
func dlog ¶
func dlog() *dlogger
dlog returns a debug logger. The caller can use methods on the returned logger to add values, which will be space-separated in the final output, much like println. The caller must call end() to finish the message.
dlog can be used from highly-constrained corners of the runtime: it is safe to use in the signal handler, from within the write barrier, from within the stack implementation, and in places that must be recursively nosplit.
This will be compiled away if built without the debuglog build tag. However, argument construction may not be. If any of the arguments are not literals or trivial expressions, consider protecting the call with "if dlogEnabled".
func getCachedDlogger ¶
func getCachedDlogger() *dlogger
func (*dlogger) b ¶
func (l *dlogger) b(x bool) *dlogger
func (*dlogger) end ¶
func (l *dlogger) end()
func (*dlogger) hex ¶
func (l *dlogger) hex(x uint64) *dlogger
func (*dlogger) i ¶
func (l *dlogger) i(x int) *dlogger
func (*dlogger) i16 ¶
func (l *dlogger) i16(x int16) *dlogger
func (*dlogger) i32 ¶
func (l *dlogger) i32(x int32) *dlogger
func (*dlogger) i64 ¶
func (l *dlogger) i64(x int64) *dlogger
func (*dlogger) i8 ¶
func (l *dlogger) i8(x int8) *dlogger
func (*dlogger) p ¶
func (l *dlogger) p(x interface{}) *dlogger
func (*dlogger) pc ¶
func (l *dlogger) pc(x uintptr) *dlogger
func (*dlogger) s ¶
func (l *dlogger) s(x string) *dlogger
func (*dlogger) traceback ¶
func (l *dlogger) traceback(x []uintptr) *dlogger
func (*dlogger) u ¶
func (l *dlogger) u(x uint) *dlogger
func (*dlogger) u16 ¶
func (l *dlogger) u16(x uint16) *dlogger
func (*dlogger) u32 ¶
func (l *dlogger) u32(x uint32) *dlogger
func (*dlogger) u64 ¶
func (l *dlogger) u64(x uint64) *dlogger
func (*dlogger) u8 ¶
func (l *dlogger) u8(x uint8) *dlogger
func (*dlogger) uptr ¶
func (l *dlogger) uptr(x uintptr) *dlogger
type eface ¶
type eface struct { _type *_type data unsafe.Pointer }
func convT2E ¶
func convT2E(t *_type, elem unsafe.Pointer) (e eface)
func convT2Enoptr ¶
func convT2Enoptr(t *_type, elem unsafe.Pointer) (e eface)
func efaceOf ¶
func efaceOf(ep *interface{}) *eface
type elfDyn ¶
type elfDyn struct { d_tag int64 /* Dynamic entry type */ d_val uint64 /* Integer value */ }
type elfEhdr ¶
type elfEhdr struct { e_ident [_EI_NIDENT]byte /* Magic number and other info */ e_type uint16 /* Object file type */ e_machine uint16 /* Architecture */ e_version uint32 /* Object file version */ e_entry uint64 /* Entry point virtual address */ e_phoff uint64 /* Program header table file offset */ e_shoff uint64 /* Section header table file offset */ e_flags uint32 /* Processor-specific flags */ e_ehsize uint16 /* ELF header size in bytes */ e_phentsize uint16 /* Program header table entry size */ e_phnum uint16 /* Program header table entry count */ e_shentsize uint16 /* Section header table entry size */ e_shnum uint16 /* Section header table entry count */ e_shstrndx uint16 /* Section header string table index */ }
type elfPhdr ¶
type elfPhdr struct { p_type uint32 /* Segment type */ p_flags uint32 /* Segment flags */ p_offset uint64 /* Segment file offset */ p_vaddr uint64 /* Segment virtual address */ p_paddr uint64 /* Segment physical address */ p_filesz uint64 /* Segment size in file */ p_memsz uint64 /* Segment size in memory */ p_align uint64 /* Segment alignment */ }
type elfShdr ¶
type elfShdr struct { sh_name uint32 /* Section name (string tbl index) */ sh_type uint32 /* Section type */ sh_flags uint64 /* Section flags */ sh_addr uint64 /* Section virtual addr at execution */ sh_offset uint64 /* Section file offset */ sh_size uint64 /* Section size in bytes */ sh_link uint32 /* Link to another section */ sh_info uint32 /* Additional section information */ sh_addralign uint64 /* Section alignment */ sh_entsize uint64 /* Entry size if section holds table */ }
type elfSym ¶
type elfSym struct { st_name uint32 st_info byte st_other byte st_shndx uint16 st_value uint64 st_size uint64 }
type elfVerdaux ¶
type elfVe