Source file src/runtime/heapdump.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Implementation of runtime/debug.WriteHeapDump. Writes all
     6  // objects in the heap plus additional info (roots, threads,
     7  // finalizers, etc.) to a file.
     8  
     9  // The format of the dumped file is described at
    10  // https://golang.org/s/go15heapdump.
    11  
    12  package runtime
    13  
    14  import (
    15  	"internal/abi"
    16  	"internal/goarch"
    17  	"internal/goexperiment"
    18  	"unsafe"
    19  )
    20  
    21  //go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
    22  func runtime_debug_WriteHeapDump(fd uintptr) {
    23  	stw := stopTheWorld(stwWriteHeapDump)
    24  
    25  	// Keep m on this G's stack instead of the system stack.
    26  	// Both readmemstats_m and writeheapdump_m have pretty large
    27  	// peak stack depths and we risk blowing the system stack.
    28  	// This is safe because the world is stopped, so we don't
    29  	// need to worry about anyone shrinking and therefore moving
    30  	// our stack.
    31  	var m MemStats
    32  	systemstack(func() {
    33  		// Call readmemstats_m here instead of deeper in
    34  		// writeheapdump_m because we might blow the system stack
    35  		// otherwise.
    36  		readmemstats_m(&m)
    37  		writeheapdump_m(fd, &m)
    38  	})
    39  
    40  	startTheWorld(stw)
    41  }
    42  
    43  const (
    44  	fieldKindEol       = 0
    45  	fieldKindPtr       = 1
    46  	fieldKindIface     = 2
    47  	fieldKindEface     = 3
    48  	tagEOF             = 0
    49  	tagObject          = 1
    50  	tagOtherRoot       = 2
    51  	tagType            = 3
    52  	tagGoroutine       = 4
    53  	tagStackFrame      = 5
    54  	tagParams          = 6
    55  	tagFinalizer       = 7
    56  	tagItab            = 8
    57  	tagOSThread        = 9
    58  	tagMemStats        = 10
    59  	tagQueuedFinalizer = 11
    60  	tagData            = 12
    61  	tagBSS             = 13
    62  	tagDefer           = 14
    63  	tagPanic           = 15
    64  	tagMemProf         = 16
    65  	tagAllocSample     = 17
    66  )
    67  
    68  var dumpfd uintptr // fd to write the dump to.
    69  var tmpbuf []byte
    70  
    71  // buffer of pending write data
    72  const (
    73  	bufSize = 4096
    74  )
    75  
    76  var buf [bufSize]byte
    77  var nbuf uintptr
    78  
    79  func dwrite(data unsafe.Pointer, len uintptr) {
    80  	if len == 0 {
    81  		return
    82  	}
    83  	if nbuf+len <= bufSize {
    84  		copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
    85  		nbuf += len
    86  		return
    87  	}
    88  
    89  	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
    90  	if len >= bufSize {
    91  		write(dumpfd, data, int32(len))
    92  		nbuf = 0
    93  	} else {
    94  		copy(buf[:], (*[bufSize]byte)(data)[:len])
    95  		nbuf = len
    96  	}
    97  }
    98  
    99  func dwritebyte(b byte) {
   100  	dwrite(unsafe.Pointer(&b), 1)
   101  }
   102  
   103  func flush() {
   104  	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
   105  	nbuf = 0
   106  }
   107  
   108  // Cache of types that have been serialized already.
   109  // We use a type's hash field to pick a bucket.
   110  // Inside a bucket, we keep a list of types that
   111  // have been serialized so far, most recently used first.
   112  // Note: when a bucket overflows we may end up
   113  // serializing a type more than once. That's ok.
   114  const (
   115  	typeCacheBuckets = 256
   116  	typeCacheAssoc   = 4
   117  )
   118  
   119  type typeCacheBucket struct {
   120  	t [typeCacheAssoc]*_type
   121  }
   122  
   123  var typecache [typeCacheBuckets]typeCacheBucket
   124  
   125  // dump a uint64 in a varint format parseable by encoding/binary.
   126  func dumpint(v uint64) {
   127  	var buf [10]byte
   128  	var n int
   129  	for v >= 0x80 {
   130  		buf[n] = byte(v | 0x80)
   131  		n++
   132  		v >>= 7
   133  	}
   134  	buf[n] = byte(v)
   135  	n++
   136  	dwrite(unsafe.Pointer(&buf), uintptr(n))
   137  }
   138  
   139  func dumpbool(b bool) {
   140  	if b {
   141  		dumpint(1)
   142  	} else {
   143  		dumpint(0)
   144  	}
   145  }
   146  
   147  // dump varint uint64 length followed by memory contents.
   148  func dumpmemrange(data unsafe.Pointer, len uintptr) {
   149  	dumpint(uint64(len))
   150  	dwrite(data, len)
   151  }
   152  
   153  func dumpslice(b []byte) {
   154  	dumpint(uint64(len(b)))
   155  	if len(b) > 0 {
   156  		dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
   157  	}
   158  }
   159  
   160  func dumpstr(s string) {
   161  	dumpmemrange(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
   162  }
   163  
   164  // dump information for a type.
   165  func dumptype(t *_type) {
   166  	if t == nil {
   167  		return
   168  	}
   169  
   170  	// If we've definitely serialized the type before,
   171  	// no need to do it again.
   172  	b := &typecache[t.Hash&(typeCacheBuckets-1)]
   173  	if t == b.t[0] {
   174  		return
   175  	}
   176  	for i := 1; i < typeCacheAssoc; i++ {
   177  		if t == b.t[i] {
   178  			// Move-to-front
   179  			for j := i; j > 0; j-- {
   180  				b.t[j] = b.t[j-1]
   181  			}
   182  			b.t[0] = t
   183  			return
   184  		}
   185  	}
   186  
   187  	// Might not have been dumped yet. Dump it and
   188  	// remember we did so.
   189  	for j := typeCacheAssoc - 1; j > 0; j-- {
   190  		b.t[j] = b.t[j-1]
   191  	}
   192  	b.t[0] = t
   193  
   194  	// dump the type
   195  	dumpint(tagType)
   196  	dumpint(uint64(uintptr(unsafe.Pointer(t))))
   197  	dumpint(uint64(t.Size_))
   198  	rt := toRType(t)
   199  	if x := t.Uncommon(); x == nil || rt.nameOff(x.PkgPath).Name() == "" {
   200  		dumpstr(rt.string())
   201  	} else {
   202  		pkgpath := rt.nameOff(x.PkgPath).Name()
   203  		name := rt.name()
   204  		dumpint(uint64(uintptr(len(pkgpath)) + 1 + uintptr(len(name))))
   205  		dwrite(unsafe.Pointer(unsafe.StringData(pkgpath)), uintptr(len(pkgpath)))
   206  		dwritebyte('.')
   207  		dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name)))
   208  	}
   209  	dumpbool(t.Kind_&kindDirectIface == 0 || t.PtrBytes != 0)
   210  }
   211  
   212  // dump an object.
   213  func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
   214  	dumpint(tagObject)
   215  	dumpint(uint64(uintptr(obj)))
   216  	dumpmemrange(obj, size)
   217  	dumpfields(bv)
   218  }
   219  
   220  func dumpotherroot(description string, to unsafe.Pointer) {
   221  	dumpint(tagOtherRoot)
   222  	dumpstr(description)
   223  	dumpint(uint64(uintptr(to)))
   224  }
   225  
   226  func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
   227  	dumpint(tagFinalizer)
   228  	dumpint(uint64(uintptr(obj)))
   229  	dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   230  	dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   231  	dumpint(uint64(uintptr(unsafe.Pointer(fint))))
   232  	dumpint(uint64(uintptr(unsafe.Pointer(ot))))
   233  }
   234  
   235  type childInfo struct {
   236  	// Information passed up from the callee frame about
   237  	// the layout of the outargs region.
   238  	argoff uintptr   // where the arguments start in the frame
   239  	arglen uintptr   // size of args region
   240  	args   bitvector // if args.n >= 0, pointer map of args region
   241  	sp     *uint8    // callee sp
   242  	depth  uintptr   // depth in call stack (0 == most recent)
   243  }
   244  
   245  // dump kinds & offsets of interesting fields in bv.
   246  func dumpbv(cbv *bitvector, offset uintptr) {
   247  	for i := uintptr(0); i < uintptr(cbv.n); i++ {
   248  		if cbv.ptrbit(i) == 1 {
   249  			dumpint(fieldKindPtr)
   250  			dumpint(uint64(offset + i*goarch.PtrSize))
   251  		}
   252  	}
   253  }
   254  
   255  func dumpframe(s *stkframe, child *childInfo) {
   256  	f := s.fn
   257  
   258  	// Figure out what we can about our stack map
   259  	pc := s.pc
   260  	pcdata := int32(-1) // Use the entry map at function entry
   261  	if pc != f.entry() {
   262  		pc--
   263  		pcdata = pcdatavalue(f, abi.PCDATA_StackMapIndex, pc)
   264  	}
   265  	if pcdata == -1 {
   266  		// We do not have a valid pcdata value but there might be a
   267  		// stackmap for this function. It is likely that we are looking
   268  		// at the function prologue, assume so and hope for the best.
   269  		pcdata = 0
   270  	}
   271  	stkmap := (*stackmap)(funcdata(f, abi.FUNCDATA_LocalsPointerMaps))
   272  
   273  	var bv bitvector
   274  	if stkmap != nil && stkmap.n > 0 {
   275  		bv = stackmapdata(stkmap, pcdata)
   276  	} else {
   277  		bv.n = -1
   278  	}
   279  
   280  	// Dump main body of stack frame.
   281  	dumpint(tagStackFrame)
   282  	dumpint(uint64(s.sp))                              // lowest address in frame
   283  	dumpint(uint64(child.depth))                       // # of frames deep on the stack
   284  	dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack
   285  	dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp)      // frame contents
   286  	dumpint(uint64(f.entry()))
   287  	dumpint(uint64(s.pc))
   288  	dumpint(uint64(s.continpc))
   289  	name := funcname(f)
   290  	if name == "" {
   291  		name = "unknown function"
   292  	}
   293  	dumpstr(name)
   294  
   295  	// Dump fields in the outargs section
   296  	if child.args.n >= 0 {
   297  		dumpbv(&child.args, child.argoff)
   298  	} else {
   299  		// conservative - everything might be a pointer
   300  		for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
   301  			dumpint(fieldKindPtr)
   302  			dumpint(uint64(off))
   303  		}
   304  	}
   305  
   306  	// Dump fields in the local vars section
   307  	if stkmap == nil {
   308  		// No locals information, dump everything.
   309  		for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
   310  			dumpint(fieldKindPtr)
   311  			dumpint(uint64(off))
   312  		}
   313  	} else if stkmap.n < 0 {
   314  		// Locals size information, dump just the locals.
   315  		size := uintptr(-stkmap.n)
   316  		for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
   317  			dumpint(fieldKindPtr)
   318  			dumpint(uint64(off))
   319  		}
   320  	} else if stkmap.n > 0 {
   321  		// Locals bitmap information, scan just the pointers in
   322  		// locals.
   323  		dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
   324  	}
   325  	dumpint(fieldKindEol)
   326  
   327  	// Record arg info for parent.
   328  	child.argoff = s.argp - s.fp
   329  	child.arglen = s.argBytes()
   330  	child.sp = (*uint8)(unsafe.Pointer(s.sp))
   331  	child.depth++
   332  	stkmap = (*stackmap)(funcdata(f, abi.FUNCDATA_ArgsPointerMaps))
   333  	if stkmap != nil {
   334  		child.args = stackmapdata(stkmap, pcdata)
   335  	} else {
   336  		child.args.n = -1
   337  	}
   338  	return
   339  }
   340  
   341  func dumpgoroutine(gp *g) {
   342  	var sp, pc, lr uintptr
   343  	if gp.syscallsp != 0 {
   344  		sp = gp.syscallsp
   345  		pc = gp.syscallpc
   346  		lr = 0
   347  	} else {
   348  		sp = gp.sched.sp
   349  		pc = gp.sched.pc
   350  		lr = gp.sched.lr
   351  	}
   352  
   353  	dumpint(tagGoroutine)
   354  	dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   355  	dumpint(uint64(sp))
   356  	dumpint(gp.goid)
   357  	dumpint(uint64(gp.gopc))
   358  	dumpint(uint64(readgstatus(gp)))
   359  	dumpbool(isSystemGoroutine(gp, false))
   360  	dumpbool(false) // isbackground
   361  	dumpint(uint64(gp.waitsince))
   362  	dumpstr(gp.waitreason.String())
   363  	dumpint(uint64(uintptr(gp.sched.ctxt)))
   364  	dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
   365  	dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
   366  	dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
   367  
   368  	// dump stack
   369  	var child childInfo
   370  	child.args.n = -1
   371  	child.arglen = 0
   372  	child.sp = nil
   373  	child.depth = 0
   374  	var u unwinder
   375  	for u.initAt(pc, sp, lr, gp, 0); u.valid(); u.next() {
   376  		dumpframe(&u.frame, &child)
   377  	}
   378  
   379  	// dump defer & panic records
   380  	for d := gp._defer; d != nil; d = d.link {
   381  		dumpint(tagDefer)
   382  		dumpint(uint64(uintptr(unsafe.Pointer(d))))
   383  		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   384  		dumpint(uint64(d.sp))
   385  		dumpint(uint64(d.pc))
   386  		fn := *(**funcval)(unsafe.Pointer(&d.fn))
   387  		dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   388  		if d.fn == nil {
   389  			// d.fn can be nil for open-coded defers
   390  			dumpint(uint64(0))
   391  		} else {
   392  			dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   393  		}
   394  		dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
   395  	}
   396  	for p := gp._panic; p != nil; p = p.link {
   397  		dumpint(tagPanic)
   398  		dumpint(uint64(uintptr(unsafe.Pointer(p))))
   399  		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   400  		eface := efaceOf(&p.arg)
   401  		dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
   402  		dumpint(uint64(uintptr(eface.data)))
   403  		dumpint(0) // was p->defer, no longer recorded
   404  		dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
   405  	}
   406  }
   407  
   408  func dumpgs() {
   409  	assertWorldStopped()
   410  
   411  	// goroutines & stacks
   412  	forEachG(func(gp *g) {
   413  		status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
   414  		switch status {
   415  		default:
   416  			print("runtime: unexpected G.status ", hex(status), "\n")
   417  			throw("dumpgs in STW - bad status")
   418  		case _Gdead:
   419  			// ok
   420  		case _Grunnable,
   421  			_Gsyscall,
   422  			_Gwaiting:
   423  			dumpgoroutine(gp)
   424  		}
   425  	})
   426  }
   427  
   428  func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
   429  	dumpint(tagQueuedFinalizer)
   430  	dumpint(uint64(uintptr(obj)))
   431  	dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   432  	dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   433  	dumpint(uint64(uintptr(unsafe.Pointer(fint))))
   434  	dumpint(uint64(uintptr(unsafe.Pointer(ot))))
   435  }
   436  
   437  func dumproots() {
   438  	// To protect mheap_.allspans.
   439  	assertWorldStopped()
   440  
   441  	// TODO(mwhudson): dump datamask etc from all objects
   442  	// data segment
   443  	dumpint(tagData)
   444  	dumpint(uint64(firstmoduledata.data))
   445  	dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data)
   446  	dumpfields(firstmoduledata.gcdatamask)
   447  
   448  	// bss segment
   449  	dumpint(tagBSS)
   450  	dumpint(uint64(firstmoduledata.bss))
   451  	dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss)
   452  	dumpfields(firstmoduledata.gcbssmask)
   453  
   454  	// mspan.types
   455  	for _, s := range mheap_.allspans {
   456  		if s.state.get() == mSpanInUse {
   457  			// Finalizers
   458  			for sp := s.specials; sp != nil; sp = sp.next {
   459  				if sp.kind != _KindSpecialFinalizer {
   460  					continue
   461  				}
   462  				spf := (*specialfinalizer)(unsafe.Pointer(sp))
   463  				p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
   464  				dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
   465  			}
   466  		}
   467  	}
   468  
   469  	// Finalizer queue
   470  	iterate_finq(finq_callback)
   471  }
   472  
   473  // Bit vector of free marks.
   474  // Needs to be as big as the largest number of objects per span.
   475  var freemark [_PageSize / 8]bool
   476  
   477  func dumpobjs() {
   478  	// To protect mheap_.allspans.
   479  	assertWorldStopped()
   480  
   481  	for _, s := range mheap_.allspans {
   482  		if s.state.get() != mSpanInUse {
   483  			continue
   484  		}
   485  		p := s.base()
   486  		size := s.elemsize
   487  		n := (s.npages << _PageShift) / size
   488  		if n > uintptr(len(freemark)) {
   489  			throw("freemark array doesn't have enough entries")
   490  		}
   491  
   492  		for freeIndex := uint16(0); freeIndex < s.nelems; freeIndex++ {
   493  			if s.isFree(uintptr(freeIndex)) {
   494  				freemark[freeIndex] = true
   495  			}
   496  		}
   497  
   498  		for j := uintptr(0); j < n; j, p = j+1, p+size {
   499  			if freemark[j] {
   500  				freemark[j] = false
   501  				continue
   502  			}
   503  			dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
   504  		}
   505  	}
   506  }
   507  
   508  func dumpparams() {
   509  	dumpint(tagParams)
   510  	x := uintptr(1)
   511  	if *(*byte)(unsafe.Pointer(&x)) == 1 {
   512  		dumpbool(false) // little-endian ptrs
   513  	} else {
   514  		dumpbool(true) // big-endian ptrs
   515  	}
   516  	dumpint(goarch.PtrSize)
   517  	var arenaStart, arenaEnd uintptr
   518  	for i1 := range mheap_.arenas {
   519  		if mheap_.arenas[i1] == nil {
   520  			continue
   521  		}
   522  		for i, ha := range mheap_.arenas[i1] {
   523  			if ha == nil {
   524  				continue
   525  			}
   526  			base := arenaBase(arenaIdx(i1)<<arenaL1Shift | arenaIdx(i))
   527  			if arenaStart == 0 || base < arenaStart {
   528  				arenaStart = base
   529  			}
   530  			if base+heapArenaBytes > arenaEnd {
   531  				arenaEnd = base + heapArenaBytes
   532  			}
   533  		}
   534  	}
   535  	dumpint(uint64(arenaStart))
   536  	dumpint(uint64(arenaEnd))
   537  	dumpstr(goarch.GOARCH)
   538  	dumpstr(buildVersion)
   539  	dumpint(uint64(ncpu))
   540  }
   541  
   542  func itab_callback(tab *itab) {
   543  	t := tab._type
   544  	dumptype(t)
   545  	dumpint(tagItab)
   546  	dumpint(uint64(uintptr(unsafe.Pointer(tab))))
   547  	dumpint(uint64(uintptr(unsafe.Pointer(t))))
   548  }
   549  
   550  func dumpitabs() {
   551  	iterate_itabs(itab_callback)
   552  }
   553  
   554  func dumpms() {
   555  	for mp := allm; mp != nil; mp = mp.alllink {
   556  		dumpint(tagOSThread)
   557  		dumpint(uint64(uintptr(unsafe.Pointer(mp))))
   558  		dumpint(uint64(mp.id))
   559  		dumpint(mp.procid)
   560  	}
   561  }
   562  
   563  //go:systemstack
   564  func dumpmemstats(m *MemStats) {
   565  	assertWorldStopped()
   566  
   567  	// These ints should be identical to the exported
   568  	// MemStats structure and should be ordered the same
   569  	// way too.
   570  	dumpint(tagMemStats)
   571  	dumpint(m.Alloc)
   572  	dumpint(m.TotalAlloc)
   573  	dumpint(m.Sys)
   574  	dumpint(m.Lookups)
   575  	dumpint(m.Mallocs)
   576  	dumpint(m.Frees)
   577  	dumpint(m.HeapAlloc)
   578  	dumpint(m.HeapSys)
   579  	dumpint(m.HeapIdle)
   580  	dumpint(m.HeapInuse)
   581  	dumpint(m.HeapReleased)
   582  	dumpint(m.HeapObjects)
   583  	dumpint(m.StackInuse)
   584  	dumpint(m.StackSys)
   585  	dumpint(m.MSpanInuse)
   586  	dumpint(m.MSpanSys)
   587  	dumpint(m.MCacheInuse)
   588  	dumpint(m.MCacheSys)
   589  	dumpint(m.BuckHashSys)
   590  	dumpint(m.GCSys)
   591  	dumpint(m.OtherSys)
   592  	dumpint(m.NextGC)
   593  	dumpint(m.LastGC)
   594  	dumpint(m.PauseTotalNs)
   595  	for i := 0; i < 256; i++ {
   596  		dumpint(m.PauseNs[i])
   597  	}
   598  	dumpint(uint64(m.NumGC))
   599  }
   600  
   601  func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
   602  	stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
   603  	dumpint(tagMemProf)
   604  	dumpint(uint64(uintptr(unsafe.Pointer(b))))
   605  	dumpint(uint64(size))
   606  	dumpint(uint64(nstk))
   607  	for i := uintptr(0); i < nstk; i++ {
   608  		pc := stk[i]
   609  		f := findfunc(pc)
   610  		if !f.valid() {
   611  			var buf [64]byte
   612  			n := len(buf)
   613  			n--
   614  			buf[n] = ')'
   615  			if pc == 0 {
   616  				n--
   617  				buf[n] = '0'
   618  			} else {
   619  				for pc > 0 {
   620  					n--
   621  					buf[n] = "0123456789abcdef"[pc&15]
   622  					pc >>= 4
   623  				}
   624  			}
   625  			n--
   626  			buf[n] = 'x'
   627  			n--
   628  			buf[n] = '0'
   629  			n--
   630  			buf[n] = '('
   631  			dumpslice(buf[n:])
   632  			dumpstr("?")
   633  			dumpint(0)
   634  		} else {
   635  			dumpstr(funcname(f))
   636  			if i > 0 && pc > f.entry() {
   637  				pc--
   638  			}
   639  			file, line := funcline(f, pc)
   640  			dumpstr(file)
   641  			dumpint(uint64(line))
   642  		}
   643  	}
   644  	dumpint(uint64(allocs))
   645  	dumpint(uint64(frees))
   646  }
   647  
   648  func dumpmemprof() {
   649  	// To protect mheap_.allspans.
   650  	assertWorldStopped()
   651  
   652  	iterate_memprof(dumpmemprof_callback)
   653  	for _, s := range mheap_.allspans {
   654  		if s.state.get() != mSpanInUse {
   655  			continue
   656  		}
   657  		for sp := s.specials; sp != nil; sp = sp.next {
   658  			if sp.kind != _KindSpecialProfile {
   659  				continue
   660  			}
   661  			spp := (*specialprofile)(unsafe.Pointer(sp))
   662  			p := s.base() + uintptr(spp.special.offset)
   663  			dumpint(tagAllocSample)
   664  			dumpint(uint64(p))
   665  			dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
   666  		}
   667  	}
   668  }
   669  
   670  var dumphdr = []byte("go1.7 heap dump\n")
   671  
   672  func mdump(m *MemStats) {
   673  	assertWorldStopped()
   674  
   675  	// make sure we're done sweeping
   676  	for _, s := range mheap_.allspans {
   677  		if s.state.get() == mSpanInUse {
   678  			s.ensureSwept()
   679  		}
   680  	}
   681  	memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
   682  	dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
   683  	dumpparams()
   684  	dumpitabs()
   685  	dumpobjs()
   686  	dumpgs()
   687  	dumpms()
   688  	dumproots()
   689  	dumpmemstats(m)
   690  	dumpmemprof()
   691  	dumpint(tagEOF)
   692  	flush()
   693  }
   694  
   695  func writeheapdump_m(fd uintptr, m *MemStats) {
   696  	assertWorldStopped()
   697  
   698  	gp := getg()
   699  	casGToWaiting(gp.m.curg, _Grunning, waitReasonDumpingHeap)
   700  
   701  	// Set dump file.
   702  	dumpfd = fd
   703  
   704  	// Call dump routine.
   705  	mdump(m)
   706  
   707  	// Reset dump file.
   708  	dumpfd = 0
   709  	if tmpbuf != nil {
   710  		sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
   711  		tmpbuf = nil
   712  	}
   713  
   714  	casgstatus(gp.m.curg, _Gwaiting, _Grunning)
   715  }
   716  
   717  // dumpint() the kind & offset of each field in an object.
   718  func dumpfields(bv bitvector) {
   719  	dumpbv(&bv, 0)
   720  	dumpint(fieldKindEol)
   721  }
   722  
   723  func makeheapobjbv(p uintptr, size uintptr) bitvector {
   724  	// Extend the temp buffer if necessary.
   725  	nptr := size / goarch.PtrSize
   726  	if uintptr(len(tmpbuf)) < nptr/8+1 {
   727  		if tmpbuf != nil {
   728  			sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
   729  		}
   730  		n := nptr/8 + 1
   731  		p := sysAlloc(n, &memstats.other_sys)
   732  		if p == nil {
   733  			throw("heapdump: out of memory")
   734  		}
   735  		tmpbuf = (*[1 << 30]byte)(p)[:n]
   736  	}
   737  	// Convert heap bitmap to pointer bitmap.
   738  	for i := uintptr(0); i < nptr/8+1; i++ {
   739  		tmpbuf[i] = 0
   740  	}
   741  	if goexperiment.AllocHeaders {
   742  		s := spanOf(p)
   743  		tp := s.typePointersOf(p, size)
   744  		for {
   745  			var addr uintptr
   746  			if tp, addr = tp.next(p + size); addr == 0 {
   747  				break
   748  			}
   749  			i := (addr - p) / goarch.PtrSize
   750  			tmpbuf[i/8] |= 1 << (i % 8)
   751  		}
   752  	} else {
   753  		hbits := heapBitsForAddr(p, size)
   754  		for {
   755  			var addr uintptr
   756  			hbits, addr = hbits.next()
   757  			if addr == 0 {
   758  				break
   759  			}
   760  			i := (addr - p) / goarch.PtrSize
   761  			tmpbuf[i/8] |= 1 << (i % 8)
   762  		}
   763  	}
   764  	return bitvector{int32(nptr), &tmpbuf[0]}
   765  }
   766  

View as plain text