Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/heapdump.go

Documentation: runtime

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Implementation of runtime/debug.WriteHeapDump. Writes all
     6  // objects in the heap plus additional info (roots, threads,
     7  // finalizers, etc.) to a file.
     8  
     9  // The format of the dumped file is described at
    10  // https://golang.org/s/go15heapdump.
    11  
    12  package runtime
    13  
    14  import (
    15  	"runtime/internal/sys"
    16  	"unsafe"
    17  )
    18  
    19  //go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
    20  func runtime_debug_WriteHeapDump(fd uintptr) {
    21  	stopTheWorld("write heap dump")
    22  
    23  	// Keep m on this G's stack instead of the system stack.
    24  	// Both readmemstats_m and writeheapdump_m have pretty large
    25  	// peak stack depths and we risk blowing the system stack.
    26  	// This is safe because the world is stopped, so we don't
    27  	// need to worry about anyone shrinking and therefore moving
    28  	// our stack.
    29  	var m MemStats
    30  	systemstack(func() {
    31  		// Call readmemstats_m here instead of deeper in
    32  		// writeheapdump_m because we might blow the system stack
    33  		// otherwise.
    34  		readmemstats_m(&m)
    35  		writeheapdump_m(fd, &m)
    36  	})
    37  
    38  	startTheWorld()
    39  }
    40  
    41  const (
    42  	fieldKindEol       = 0
    43  	fieldKindPtr       = 1
    44  	fieldKindIface     = 2
    45  	fieldKindEface     = 3
    46  	tagEOF             = 0
    47  	tagObject          = 1
    48  	tagOtherRoot       = 2
    49  	tagType            = 3
    50  	tagGoroutine       = 4
    51  	tagStackFrame      = 5
    52  	tagParams          = 6
    53  	tagFinalizer       = 7
    54  	tagItab            = 8
    55  	tagOSThread        = 9
    56  	tagMemStats        = 10
    57  	tagQueuedFinalizer = 11
    58  	tagData            = 12
    59  	tagBSS             = 13
    60  	tagDefer           = 14
    61  	tagPanic           = 15
    62  	tagMemProf         = 16
    63  	tagAllocSample     = 17
    64  )
    65  
    66  var dumpfd uintptr // fd to write the dump to.
    67  var tmpbuf []byte
    68  
    69  // buffer of pending write data
    70  const (
    71  	bufSize = 4096
    72  )
    73  
    74  var buf [bufSize]byte
    75  var nbuf uintptr
    76  
    77  func dwrite(data unsafe.Pointer, len uintptr) {
    78  	if len == 0 {
    79  		return
    80  	}
    81  	if nbuf+len <= bufSize {
    82  		copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
    83  		nbuf += len
    84  		return
    85  	}
    86  
    87  	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
    88  	if len >= bufSize {
    89  		write(dumpfd, data, int32(len))
    90  		nbuf = 0
    91  	} else {
    92  		copy(buf[:], (*[bufSize]byte)(data)[:len])
    93  		nbuf = len
    94  	}
    95  }
    96  
    97  func dwritebyte(b byte) {
    98  	dwrite(unsafe.Pointer(&b), 1)
    99  }
   100  
   101  func flush() {
   102  	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
   103  	nbuf = 0
   104  }
   105  
   106  // Cache of types that have been serialized already.
   107  // We use a type's hash field to pick a bucket.
   108  // Inside a bucket, we keep a list of types that
   109  // have been serialized so far, most recently used first.
   110  // Note: when a bucket overflows we may end up
   111  // serializing a type more than once. That's ok.
   112  const (
   113  	typeCacheBuckets = 256
   114  	typeCacheAssoc   = 4
   115  )
   116  
   117  type typeCacheBucket struct {
   118  	t [typeCacheAssoc]*_type
   119  }
   120  
   121  var typecache [typeCacheBuckets]typeCacheBucket
   122  
   123  // dump a uint64 in a varint format parseable by encoding/binary
   124  func dumpint(v uint64) {
   125  	var buf [10]byte
   126  	var n int
   127  	for v >= 0x80 {
   128  		buf[n] = byte(v | 0x80)
   129  		n++
   130  		v >>= 7
   131  	}
   132  	buf[n] = byte(v)
   133  	n++
   134  	dwrite(unsafe.Pointer(&buf), uintptr(n))
   135  }
   136  
   137  func dumpbool(b bool) {
   138  	if b {
   139  		dumpint(1)
   140  	} else {
   141  		dumpint(0)
   142  	}
   143  }
   144  
   145  // dump varint uint64 length followed by memory contents
   146  func dumpmemrange(data unsafe.Pointer, len uintptr) {
   147  	dumpint(uint64(len))
   148  	dwrite(data, len)
   149  }
   150  
   151  func dumpslice(b []byte) {
   152  	dumpint(uint64(len(b)))
   153  	if len(b) > 0 {
   154  		dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
   155  	}
   156  }
   157  
   158  func dumpstr(s string) {
   159  	sp := stringStructOf(&s)
   160  	dumpmemrange(sp.str, uintptr(sp.len))
   161  }
   162  
   163  // dump information for a type
   164  func dumptype(t *_type) {
   165  	if t == nil {
   166  		return
   167  	}
   168  
   169  	// If we've definitely serialized the type before,
   170  	// no need to do it again.
   171  	b := &typecache[t.hash&(typeCacheBuckets-1)]
   172  	if t == b.t[0] {
   173  		return
   174  	}
   175  	for i := 1; i < typeCacheAssoc; i++ {
   176  		if t == b.t[i] {
   177  			// Move-to-front
   178  			for j := i; j > 0; j-- {
   179  				b.t[j] = b.t[j-1]
   180  			}
   181  			b.t[0] = t
   182  			return
   183  		}
   184  	}
   185  
   186  	// Might not have been dumped yet. Dump it and
   187  	// remember we did so.
   188  	for j := typeCacheAssoc - 1; j > 0; j-- {
   189  		b.t[j] = b.t[j-1]
   190  	}
   191  	b.t[0] = t
   192  
   193  	// dump the type
   194  	dumpint(tagType)
   195  	dumpint(uint64(uintptr(unsafe.Pointer(t))))
   196  	dumpint(uint64(t.size))
   197  	if x := t.uncommon(); x == nil || t.nameOff(x.pkgpath).name() == "" {
   198  		dumpstr(t.string())
   199  	} else {
   200  		pkgpathstr := t.nameOff(x.pkgpath).name()
   201  		pkgpath := stringStructOf(&pkgpathstr)
   202  		namestr := t.name()
   203  		name := stringStructOf(&namestr)
   204  		dumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len)))
   205  		dwrite(pkgpath.str, uintptr(pkgpath.len))
   206  		dwritebyte('.')
   207  		dwrite(name.str, uintptr(name.len))
   208  	}
   209  	dumpbool(t.kind&kindDirectIface == 0 || t.ptrdata != 0)
   210  }
   211  
   212  // dump an object
   213  func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
   214  	dumpint(tagObject)
   215  	dumpint(uint64(uintptr(obj)))
   216  	dumpmemrange(obj, size)
   217  	dumpfields(bv)
   218  }
   219  
   220  func dumpotherroot(description string, to unsafe.Pointer) {
   221  	dumpint(tagOtherRoot)
   222  	dumpstr(description)
   223  	dumpint(uint64(uintptr(to)))
   224  }
   225  
   226  func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
   227  	dumpint(tagFinalizer)
   228  	dumpint(uint64(uintptr(obj)))
   229  	dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   230  	dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   231  	dumpint(uint64(uintptr(unsafe.Pointer(fint))))
   232  	dumpint(uint64(uintptr(unsafe.Pointer(ot))))
   233  }
   234  
   235  type childInfo struct {
   236  	// Information passed up from the callee frame about
   237  	// the layout of the outargs region.
   238  	argoff uintptr   // where the arguments start in the frame
   239  	arglen uintptr   // size of args region
   240  	args   bitvector // if args.n >= 0, pointer map of args region
   241  	sp     *uint8    // callee sp
   242  	depth  uintptr   // depth in call stack (0 == most recent)
   243  }
   244  
   245  // dump kinds & offsets of interesting fields in bv
   246  func dumpbv(cbv *bitvector, offset uintptr) {
   247  	for i := uintptr(0); i < uintptr(cbv.n); i++ {
   248  		if cbv.ptrbit(i) == 1 {
   249  			dumpint(fieldKindPtr)
   250  			dumpint(uint64(offset + i*sys.PtrSize))
   251  		}
   252  	}
   253  }
   254  
   255  func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
   256  	child := (*childInfo)(arg)
   257  	f := s.fn
   258  
   259  	// Figure out what we can about our stack map
   260  	pc := s.pc
   261  	pcdata := int32(-1) // Use the entry map at function entry
   262  	if pc != f.entry {
   263  		pc--
   264  		pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, pc, nil)
   265  	}
   266  	if pcdata == -1 {
   267  		// We do not have a valid pcdata value but there might be a
   268  		// stackmap for this function. It is likely that we are looking
   269  		// at the function prologue, assume so and hope for the best.
   270  		pcdata = 0
   271  	}
   272  	stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
   273  
   274  	var bv bitvector
   275  	if stkmap != nil && stkmap.n > 0 {
   276  		bv = stackmapdata(stkmap, pcdata)
   277  	} else {
   278  		bv.n = -1
   279  	}
   280  
   281  	// Dump main body of stack frame.
   282  	dumpint(tagStackFrame)
   283  	dumpint(uint64(s.sp))                              // lowest address in frame
   284  	dumpint(uint64(child.depth))                       // # of frames deep on the stack
   285  	dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack
   286  	dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp)      // frame contents
   287  	dumpint(uint64(f.entry))
   288  	dumpint(uint64(s.pc))
   289  	dumpint(uint64(s.continpc))
   290  	name := funcname(f)
   291  	if name == "" {
   292  		name = "unknown function"
   293  	}
   294  	dumpstr(name)
   295  
   296  	// Dump fields in the outargs section
   297  	if child.args.n >= 0 {
   298  		dumpbv(&child.args, child.argoff)
   299  	} else {
   300  		// conservative - everything might be a pointer
   301  		for off := child.argoff; off < child.argoff+child.arglen; off += sys.PtrSize {
   302  			dumpint(fieldKindPtr)
   303  			dumpint(uint64(off))
   304  		}
   305  	}
   306  
   307  	// Dump fields in the local vars section
   308  	if stkmap == nil {
   309  		// No locals information, dump everything.
   310  		for off := child.arglen; off < s.varp-s.sp; off += sys.PtrSize {
   311  			dumpint(fieldKindPtr)
   312  			dumpint(uint64(off))
   313  		}
   314  	} else if stkmap.n < 0 {
   315  		// Locals size information, dump just the locals.
   316  		size := uintptr(-stkmap.n)
   317  		for off := s.varp - size - s.sp; off < s.varp-s.sp; off += sys.PtrSize {
   318  			dumpint(fieldKindPtr)
   319  			dumpint(uint64(off))
   320  		}
   321  	} else if stkmap.n > 0 {
   322  		// Locals bitmap information, scan just the pointers in
   323  		// locals.
   324  		dumpbv(&bv, s.varp-uintptr(bv.n)*sys.PtrSize-s.sp)
   325  	}
   326  	dumpint(fieldKindEol)
   327  
   328  	// Record arg info for parent.
   329  	child.argoff = s.argp - s.fp
   330  	child.arglen = s.arglen
   331  	child.sp = (*uint8)(unsafe.Pointer(s.sp))
   332  	child.depth++
   333  	stkmap = (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
   334  	if stkmap != nil {
   335  		child.args = stackmapdata(stkmap, pcdata)
   336  	} else {
   337  		child.args.n = -1
   338  	}
   339  	return true
   340  }
   341  
   342  func dumpgoroutine(gp *g) {
   343  	var sp, pc, lr uintptr
   344  	if gp.syscallsp != 0 {
   345  		sp = gp.syscallsp
   346  		pc = gp.syscallpc
   347  		lr = 0
   348  	} else {
   349  		sp = gp.sched.sp
   350  		pc = gp.sched.pc
   351  		lr = gp.sched.lr
   352  	}
   353  
   354  	dumpint(tagGoroutine)
   355  	dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   356  	dumpint(uint64(sp))
   357  	dumpint(uint64(gp.goid))
   358  	dumpint(uint64(gp.gopc))
   359  	dumpint(uint64(readgstatus(gp)))
   360  	dumpbool(isSystemGoroutine(gp, false))
   361  	dumpbool(false) // isbackground
   362  	dumpint(uint64(gp.waitsince))
   363  	dumpstr(gp.waitreason.String())
   364  	dumpint(uint64(uintptr(gp.sched.ctxt)))
   365  	dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
   366  	dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
   367  	dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
   368  
   369  	// dump stack
   370  	var child childInfo
   371  	child.args.n = -1
   372  	child.arglen = 0
   373  	child.sp = nil
   374  	child.depth = 0
   375  	gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, noescape(unsafe.Pointer(&child)), 0)
   376  
   377  	// dump defer & panic records
   378  	for d := gp._defer; d != nil; d = d.link {
   379  		dumpint(tagDefer)
   380  		dumpint(uint64(uintptr(unsafe.Pointer(d))))
   381  		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   382  		dumpint(uint64(d.sp))
   383  		dumpint(uint64(d.pc))
   384  		dumpint(uint64(uintptr(unsafe.Pointer(d.fn))))
   385  		if d.fn == nil {
   386  			// d.fn can be nil for open-coded defers
   387  			dumpint(uint64(0))
   388  		} else {
   389  			dumpint(uint64(uintptr(unsafe.Pointer(d.fn.fn))))
   390  		}
   391  		dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
   392  	}
   393  	for p := gp._panic; p != nil; p = p.link {
   394  		dumpint(tagPanic)
   395  		dumpint(uint64(uintptr(unsafe.Pointer(p))))
   396  		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   397  		eface := efaceOf(&p.arg)
   398  		dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
   399  		dumpint(uint64(uintptr(unsafe.Pointer(eface.data))))
   400  		dumpint(0) // was p->defer, no longer recorded
   401  		dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
   402  	}
   403  }
   404  
   405  func dumpgs() {
   406  	// goroutines & stacks
   407  	for i := 0; uintptr(i) < allglen; i++ {
   408  		gp := allgs[i]
   409  		status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
   410  		switch status {
   411  		default:
   412  			print("runtime: unexpected G.status ", hex(status), "\n")
   413  			throw("dumpgs in STW - bad status")
   414  		case _Gdead:
   415  			// ok
   416  		case _Grunnable,
   417  			_Gsyscall,
   418  			_Gwaiting:
   419  			dumpgoroutine(gp)
   420  		}
   421  	}
   422  }
   423  
   424  func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
   425  	dumpint(tagQueuedFinalizer)
   426  	dumpint(uint64(uintptr(obj)))
   427  	dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   428  	dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   429  	dumpint(uint64(uintptr(unsafe.Pointer(fint))))
   430  	dumpint(uint64(uintptr(unsafe.Pointer(ot))))
   431  }
   432  
   433  func dumproots() {
   434  	// To protect mheap_.allspans.
   435  	assertWorldStopped()
   436  
   437  	// TODO(mwhudson): dump datamask etc from all objects
   438  	// data segment
   439  	dumpint(tagData)
   440  	dumpint(uint64(firstmoduledata.data))
   441  	dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data)
   442  	dumpfields(firstmoduledata.gcdatamask)
   443  
   444  	// bss segment
   445  	dumpint(tagBSS)
   446  	dumpint(uint64(firstmoduledata.bss))
   447  	dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss)
   448  	dumpfields(firstmoduledata.gcbssmask)
   449  
   450  	// mspan.types
   451  	for _, s := range mheap_.allspans {
   452  		if s.state.get() == mSpanInUse {
   453  			// Finalizers
   454  			for sp := s.specials; sp != nil; sp = sp.next {
   455  				if sp.kind != _KindSpecialFinalizer {
   456  					continue
   457  				}
   458  				spf := (*specialfinalizer)(unsafe.Pointer(sp))
   459  				p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
   460  				dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
   461  			}
   462  		}
   463  	}
   464  
   465  	// Finalizer queue
   466  	iterate_finq(finq_callback)
   467  }
   468  
   469  // Bit vector of free marks.
   470  // Needs to be as big as the largest number of objects per span.
   471  var freemark [_PageSize / 8]bool
   472  
   473  func dumpobjs() {
   474  	// To protect mheap_.allspans.
   475  	assertWorldStopped()
   476  
   477  	for _, s := range mheap_.allspans {
   478  		if s.state.get() != mSpanInUse {
   479  			continue
   480  		}
   481  		p := s.base()
   482  		size := s.elemsize
   483  		n := (s.npages << _PageShift) / size
   484  		if n > uintptr(len(freemark)) {
   485  			throw("freemark array doesn't have enough entries")
   486  		}
   487  
   488  		for freeIndex := uintptr(0); freeIndex < s.nelems; freeIndex++ {
   489  			if s.isFree(freeIndex) {
   490  				freemark[freeIndex] = true
   491  			}
   492  		}
   493  
   494  		for j := uintptr(0); j < n; j, p = j+1, p+size {
   495  			if freemark[j] {
   496  				freemark[j] = false
   497  				continue
   498  			}
   499  			dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
   500  		}
   501  	}
   502  }
   503  
   504  func dumpparams() {
   505  	dumpint(tagParams)
   506  	x := uintptr(1)
   507  	if *(*byte)(unsafe.Pointer(&x)) == 1 {
   508  		dumpbool(false) // little-endian ptrs
   509  	} else {
   510  		dumpbool(true) // big-endian ptrs
   511  	}
   512  	dumpint(sys.PtrSize)
   513  	var arenaStart, arenaEnd uintptr
   514  	for i1 := range mheap_.arenas {
   515  		if mheap_.arenas[i1] == nil {
   516  			continue
   517  		}
   518  		for i, ha := range mheap_.arenas[i1] {
   519  			if ha == nil {
   520  				continue
   521  			}
   522  			base := arenaBase(arenaIdx(i1)<<arenaL1Shift | arenaIdx(i))
   523  			if arenaStart == 0 || base < arenaStart {
   524  				arenaStart = base
   525  			}
   526  			if base+heapArenaBytes > arenaEnd {
   527  				arenaEnd = base + heapArenaBytes
   528  			}
   529  		}
   530  	}
   531  	dumpint(uint64(arenaStart))
   532  	dumpint(uint64(arenaEnd))
   533  	dumpstr(sys.GOARCH)
   534  	dumpstr(sys.Goexperiment)
   535  	dumpint(uint64(ncpu))
   536  }
   537  
   538  func itab_callback(tab *itab) {
   539  	t := tab._type
   540  	dumptype(t)
   541  	dumpint(tagItab)
   542  	dumpint(uint64(uintptr(unsafe.Pointer(tab))))
   543  	dumpint(uint64(uintptr(unsafe.Pointer(t))))
   544  }
   545  
   546  func dumpitabs() {
   547  	iterate_itabs(itab_callback)
   548  }
   549  
   550  func dumpms() {
   551  	for mp := allm; mp != nil; mp = mp.alllink {
   552  		dumpint(tagOSThread)
   553  		dumpint(uint64(uintptr(unsafe.Pointer(mp))))
   554  		dumpint(uint64(mp.id))
   555  		dumpint(mp.procid)
   556  	}
   557  }
   558  
   559  //go:systemstack
   560  func dumpmemstats(m *MemStats) {
   561  	assertWorldStopped()
   562  
   563  	// These ints should be identical to the exported
   564  	// MemStats structure and should be ordered the same
   565  	// way too.
   566  	dumpint(tagMemStats)
   567  	dumpint(m.Alloc)
   568  	dumpint(m.TotalAlloc)
   569  	dumpint(m.Sys)
   570  	dumpint(m.Lookups)
   571  	dumpint(m.Mallocs)
   572  	dumpint(m.Frees)
   573  	dumpint(m.HeapAlloc)
   574  	dumpint(m.HeapSys)
   575  	dumpint(m.HeapIdle)
   576  	dumpint(m.HeapInuse)
   577  	dumpint(m.HeapReleased)
   578  	dumpint(m.HeapObjects)
   579  	dumpint(m.StackInuse)
   580  	dumpint(m.StackSys)
   581  	dumpint(m.MSpanInuse)
   582  	dumpint(m.MSpanSys)
   583  	dumpint(m.MCacheInuse)
   584  	dumpint(m.MCacheSys)
   585  	dumpint(m.BuckHashSys)
   586  	dumpint(m.GCSys)
   587  	dumpint(m.OtherSys)
   588  	dumpint(m.NextGC)
   589  	dumpint(m.LastGC)
   590  	dumpint(m.PauseTotalNs)
   591  	for i := 0; i < 256; i++ {
   592  		dumpint(m.PauseNs[i])
   593  	}
   594  	dumpint(uint64(m.NumGC))
   595  }
   596  
   597  func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
   598  	stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
   599  	dumpint(tagMemProf)
   600  	dumpint(uint64(uintptr(unsafe.Pointer(b))))
   601  	dumpint(uint64(size))
   602  	dumpint(uint64(nstk))
   603  	for i := uintptr(0); i < nstk; i++ {
   604  		pc := stk[i]
   605  		f := findfunc(pc)
   606  		if !f.valid() {
   607  			var buf [64]byte
   608  			n := len(buf)
   609  			n--
   610  			buf[n] = ')'
   611  			if pc == 0 {
   612  				n--
   613  				buf[n] = '0'
   614  			} else {
   615  				for pc > 0 {
   616  					n--
   617  					buf[n] = "0123456789abcdef"[pc&15]
   618  					pc >>= 4
   619  				}
   620  			}
   621  			n--
   622  			buf[n] = 'x'
   623  			n--
   624  			buf[n] = '0'
   625  			n--
   626  			buf[n] = '('
   627  			dumpslice(buf[n:])
   628  			dumpstr("?")
   629  			dumpint(0)
   630  		} else {
   631  			dumpstr(funcname(f))
   632  			if i > 0 && pc > f.entry {
   633  				pc--
   634  			}
   635  			file, line := funcline(f, pc)
   636  			dumpstr(file)
   637  			dumpint(uint64(line))
   638  		}
   639  	}
   640  	dumpint(uint64(allocs))
   641  	dumpint(uint64(frees))
   642  }
   643  
   644  func dumpmemprof() {
   645  	// To protect mheap_.allspans.
   646  	assertWorldStopped()
   647  
   648  	iterate_memprof(dumpmemprof_callback)
   649  	for _, s := range mheap_.allspans {
   650  		if s.state.get() != mSpanInUse {
   651  			continue
   652  		}
   653  		for sp := s.specials; sp != nil; sp = sp.next {
   654  			if sp.kind != _KindSpecialProfile {
   655  				continue
   656  			}
   657  			spp := (*specialprofile)(unsafe.Pointer(sp))
   658  			p := s.base() + uintptr(spp.special.offset)
   659  			dumpint(tagAllocSample)
   660  			dumpint(uint64(p))
   661  			dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
   662  		}
   663  	}
   664  }
   665  
   666  var dumphdr = []byte("go1.7 heap dump\n")
   667  
   668  func mdump(m *MemStats) {
   669  	assertWorldStopped()
   670  
   671  	// make sure we're done sweeping
   672  	for _, s := range mheap_.allspans {
   673  		if s.state.get() == mSpanInUse {
   674  			s.ensureSwept()
   675  		}
   676  	}
   677  	memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
   678  	dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
   679  	dumpparams()
   680  	dumpitabs()
   681  	dumpobjs()
   682  	dumpgs()
   683  	dumpms()
   684  	dumproots()
   685  	dumpmemstats(m)
   686  	dumpmemprof()
   687  	dumpint(tagEOF)
   688  	flush()
   689  }
   690  
   691  func writeheapdump_m(fd uintptr, m *MemStats) {
   692  	assertWorldStopped()
   693  
   694  	_g_ := getg()
   695  	casgstatus(_g_.m.curg, _Grunning, _Gwaiting)
   696  	_g_.waitreason = waitReasonDumpingHeap
   697  
   698  	// Update stats so we can dump them.
   699  	// As a side effect, flushes all the mcaches so the mspan.freelist
   700  	// lists contain all the free objects.
   701  	updatememstats()
   702  
   703  	// Set dump file.
   704  	dumpfd = fd
   705  
   706  	// Call dump routine.
   707  	mdump(m)
   708  
   709  	// Reset dump file.
   710  	dumpfd = 0
   711  	if tmpbuf != nil {
   712  		sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
   713  		tmpbuf = nil
   714  	}
   715  
   716  	casgstatus(_g_.m.curg, _Gwaiting, _Grunning)
   717  }
   718  
   719  // dumpint() the kind & offset of each field in an object.
   720  func dumpfields(bv bitvector) {
   721  	dumpbv(&bv, 0)
   722  	dumpint(fieldKindEol)
   723  }
   724  
   725  func makeheapobjbv(p uintptr, size uintptr) bitvector {
   726  	// Extend the temp buffer if necessary.
   727  	nptr := size / sys.PtrSize
   728  	if uintptr(len(tmpbuf)) < nptr/8+1 {
   729  		if tmpbuf != nil {
   730  			sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
   731  		}
   732  		n := nptr/8 + 1
   733  		p := sysAlloc(n, &memstats.other_sys)
   734  		if p == nil {
   735  			throw("heapdump: out of memory")
   736  		}
   737  		tmpbuf = (*[1 << 30]byte)(p)[:n]
   738  	}
   739  	// Convert heap bitmap to pointer bitmap.
   740  	for i := uintptr(0); i < nptr/8+1; i++ {
   741  		tmpbuf[i] = 0
   742  	}
   743  	i := uintptr(0)
   744  	hbits := heapBitsForAddr(p)
   745  	for ; i < nptr; i++ {
   746  		if !hbits.morePointers() {
   747  			break // end of object
   748  		}
   749  		if hbits.isPointer() {
   750  			tmpbuf[i/8] |= 1 << (i % 8)
   751  		}
   752  		hbits = hbits.next()
   753  	}
   754  	return bitvector{int32(i), &tmpbuf[0]}
   755  }
   756  

View as plain text