Source file src/runtime/debuglog.go

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // This file provides an internal debug logging facility. The debug
     6  // log is a lightweight, in-memory, per-M ring buffer. By default, the
     7  // runtime prints the debug log on panic.
     8  //
     9  // To print something to the debug log, call dlog to obtain a dlogger
    10  // and use the methods on that to add values. The values will be
    11  // space-separated in the output (much like println).
    12  //
    13  // This facility can be enabled by passing -tags debuglog when
    14  // building. Without this tag, dlog calls compile to nothing.
    15  
    16  package runtime
    17  
    18  import (
    19  	"runtime/internal/atomic"
    20  	"runtime/internal/sys"
    21  	"unsafe"
    22  )
    23  
    24  // debugLogBytes is the size of each per-M ring buffer. This is
    25  // allocated off-heap to avoid blowing up the M and hence the GC'd
    26  // heap size.
    27  const debugLogBytes = 16 << 10
    28  
    29  // debugLogStringLimit is the maximum number of bytes in a string.
    30  // Above this, the string will be truncated with "..(n more bytes).."
    31  const debugLogStringLimit = debugLogBytes / 8
    32  
    33  // dlog returns a debug logger. The caller can use methods on the
    34  // returned logger to add values, which will be space-separated in the
    35  // final output, much like println. The caller must call end() to
    36  // finish the message.
    37  //
    38  // dlog can be used from highly-constrained corners of the runtime: it
    39  // is safe to use in the signal handler, from within the write
    40  // barrier, from within the stack implementation, and in places that
    41  // must be recursively nosplit.
    42  //
    43  // This will be compiled away if built without the debuglog build tag.
    44  // However, argument construction may not be. If any of the arguments
    45  // are not literals or trivial expressions, consider protecting the
    46  // call with "if dlogEnabled".
    47  //
    48  //go:nosplit
    49  //go:nowritebarrierrec
    50  func dlog() *dlogger {
    51  	if !dlogEnabled {
    52  		return nil
    53  	}
    54  
    55  	// Get the time.
    56  	tick, nano := uint64(cputicks()), uint64(nanotime())
    57  
    58  	// Try to get a cached logger.
    59  	l := getCachedDlogger()
    60  
    61  	// If we couldn't get a cached logger, try to get one from the
    62  	// global pool.
    63  	if l == nil {
    64  		allp := (*uintptr)(unsafe.Pointer(&allDloggers))
    65  		all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp)))
    66  		for l1 := all; l1 != nil; l1 = l1.allLink {
    67  			if l1.owned.Load() == 0 && l1.owned.CompareAndSwap(0, 1) {
    68  				l = l1
    69  				break
    70  			}
    71  		}
    72  	}
    73  
    74  	// If that failed, allocate a new logger.
    75  	if l == nil {
    76  		// Use sysAllocOS instead of sysAlloc because we want to interfere
    77  		// with the runtime as little as possible, and sysAlloc updates accounting.
    78  		l = (*dlogger)(sysAllocOS(unsafe.Sizeof(dlogger{})))
    79  		if l == nil {
    80  			throw("failed to allocate debug log")
    81  		}
    82  		l.w.r.data = &l.w.data
    83  		l.owned.Store(1)
    84  
    85  		// Prepend to allDloggers list.
    86  		headp := (*uintptr)(unsafe.Pointer(&allDloggers))
    87  		for {
    88  			head := atomic.Loaduintptr(headp)
    89  			l.allLink = (*dlogger)(unsafe.Pointer(head))
    90  			if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) {
    91  				break
    92  			}
    93  		}
    94  	}
    95  
    96  	// If the time delta is getting too high, write a new sync
    97  	// packet. We set the limit so we don't write more than 6
    98  	// bytes of delta in the record header.
    99  	const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets
   100  	if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit {
   101  		l.w.writeSync(tick, nano)
   102  	}
   103  
   104  	// Reserve space for framing header.
   105  	l.w.ensure(debugLogHeaderSize)
   106  	l.w.write += debugLogHeaderSize
   107  
   108  	// Write record header.
   109  	l.w.uvarint(tick - l.w.tick)
   110  	l.w.uvarint(nano - l.w.nano)
   111  	gp := getg()
   112  	if gp != nil && gp.m != nil && gp.m.p != 0 {
   113  		l.w.varint(int64(gp.m.p.ptr().id))
   114  	} else {
   115  		l.w.varint(-1)
   116  	}
   117  
   118  	return l
   119  }
   120  
   121  // A dlogger writes to the debug log.
   122  //
   123  // To obtain a dlogger, call dlog(). When done with the dlogger, call
   124  // end().
   125  type dlogger struct {
   126  	_ sys.NotInHeap
   127  	w debugLogWriter
   128  
   129  	// allLink is the next dlogger in the allDloggers list.
   130  	allLink *dlogger
   131  
   132  	// owned indicates that this dlogger is owned by an M. This is
   133  	// accessed atomically.
   134  	owned atomic.Uint32
   135  }
   136  
   137  // allDloggers is a list of all dloggers, linked through
   138  // dlogger.allLink. This is accessed atomically. This is prepend only,
   139  // so it doesn't need to protect against ABA races.
   140  var allDloggers *dlogger
   141  
   142  //go:nosplit
   143  func (l *dlogger) end() {
   144  	if !dlogEnabled {
   145  		return
   146  	}
   147  
   148  	// Fill in framing header.
   149  	size := l.w.write - l.w.r.end
   150  	if !l.w.writeFrameAt(l.w.r.end, size) {
   151  		throw("record too large")
   152  	}
   153  
   154  	// Commit the record.
   155  	l.w.r.end = l.w.write
   156  
   157  	// Attempt to return this logger to the cache.
   158  	if putCachedDlogger(l) {
   159  		return
   160  	}
   161  
   162  	// Return the logger to the global pool.
   163  	l.owned.Store(0)
   164  }
   165  
   166  const (
   167  	debugLogUnknown = 1 + iota
   168  	debugLogBoolTrue
   169  	debugLogBoolFalse
   170  	debugLogInt
   171  	debugLogUint
   172  	debugLogHex
   173  	debugLogPtr
   174  	debugLogString
   175  	debugLogConstString
   176  	debugLogStringOverflow
   177  
   178  	debugLogPC
   179  	debugLogTraceback
   180  )
   181  
   182  //go:nosplit
   183  func (l *dlogger) b(x bool) *dlogger {
   184  	if !dlogEnabled {
   185  		return l
   186  	}
   187  	if x {
   188  		l.w.byte(debugLogBoolTrue)
   189  	} else {
   190  		l.w.byte(debugLogBoolFalse)
   191  	}
   192  	return l
   193  }
   194  
   195  //go:nosplit
   196  func (l *dlogger) i(x int) *dlogger {
   197  	return l.i64(int64(x))
   198  }
   199  
   200  //go:nosplit
   201  func (l *dlogger) i8(x int8) *dlogger {
   202  	return l.i64(int64(x))
   203  }
   204  
   205  //go:nosplit
   206  func (l *dlogger) i16(x int16) *dlogger {
   207  	return l.i64(int64(x))
   208  }
   209  
   210  //go:nosplit
   211  func (l *dlogger) i32(x int32) *dlogger {
   212  	return l.i64(int64(x))
   213  }
   214  
   215  //go:nosplit
   216  func (l *dlogger) i64(x int64) *dlogger {
   217  	if !dlogEnabled {
   218  		return l
   219  	}
   220  	l.w.byte(debugLogInt)
   221  	l.w.varint(x)
   222  	return l
   223  }
   224  
   225  //go:nosplit
   226  func (l *dlogger) u(x uint) *dlogger {
   227  	return l.u64(uint64(x))
   228  }
   229  
   230  //go:nosplit
   231  func (l *dlogger) uptr(x uintptr) *dlogger {
   232  	return l.u64(uint64(x))
   233  }
   234  
   235  //go:nosplit
   236  func (l *dlogger) u8(x uint8) *dlogger {
   237  	return l.u64(uint64(x))
   238  }
   239  
   240  //go:nosplit
   241  func (l *dlogger) u16(x uint16) *dlogger {
   242  	return l.u64(uint64(x))
   243  }
   244  
   245  //go:nosplit
   246  func (l *dlogger) u32(x uint32) *dlogger {
   247  	return l.u64(uint64(x))
   248  }
   249  
   250  //go:nosplit
   251  func (l *dlogger) u64(x uint64) *dlogger {
   252  	if !dlogEnabled {
   253  		return l
   254  	}
   255  	l.w.byte(debugLogUint)
   256  	l.w.uvarint(x)
   257  	return l
   258  }
   259  
   260  //go:nosplit
   261  func (l *dlogger) hex(x uint64) *dlogger {
   262  	if !dlogEnabled {
   263  		return l
   264  	}
   265  	l.w.byte(debugLogHex)
   266  	l.w.uvarint(x)
   267  	return l
   268  }
   269  
   270  //go:nosplit
   271  func (l *dlogger) p(x any) *dlogger {
   272  	if !dlogEnabled {
   273  		return l
   274  	}
   275  	l.w.byte(debugLogPtr)
   276  	if x == nil {
   277  		l.w.uvarint(0)
   278  	} else {
   279  		v := efaceOf(&x)
   280  		switch v._type.Kind_ & kindMask {
   281  		case kindChan, kindFunc, kindMap, kindPtr, kindUnsafePointer:
   282  			l.w.uvarint(uint64(uintptr(v.data)))
   283  		default:
   284  			throw("not a pointer type")
   285  		}
   286  	}
   287  	return l
   288  }
   289  
   290  //go:nosplit
   291  func (l *dlogger) s(x string) *dlogger {
   292  	if !dlogEnabled {
   293  		return l
   294  	}
   295  
   296  	strData := unsafe.StringData(x)
   297  	datap := &firstmoduledata
   298  	if len(x) > 4 && datap.etext <= uintptr(unsafe.Pointer(strData)) && uintptr(unsafe.Pointer(strData)) < datap.end {
   299  		// String constants are in the rodata section, which
   300  		// isn't recorded in moduledata. But it has to be
   301  		// somewhere between etext and end.
   302  		l.w.byte(debugLogConstString)
   303  		l.w.uvarint(uint64(len(x)))
   304  		l.w.uvarint(uint64(uintptr(unsafe.Pointer(strData)) - datap.etext))
   305  	} else {
   306  		l.w.byte(debugLogString)
   307  		// We can't use unsafe.Slice as it may panic, which isn't safe
   308  		// in this (potentially) nowritebarrier context.
   309  		var b []byte
   310  		bb := (*slice)(unsafe.Pointer(&b))
   311  		bb.array = unsafe.Pointer(strData)
   312  		bb.len, bb.cap = len(x), len(x)
   313  		if len(b) > debugLogStringLimit {
   314  			b = b[:debugLogStringLimit]
   315  		}
   316  		l.w.uvarint(uint64(len(b)))
   317  		l.w.bytes(b)
   318  		if len(b) != len(x) {
   319  			l.w.byte(debugLogStringOverflow)
   320  			l.w.uvarint(uint64(len(x) - len(b)))
   321  		}
   322  	}
   323  	return l
   324  }
   325  
   326  //go:nosplit
   327  func (l *dlogger) pc(x uintptr) *dlogger {
   328  	if !dlogEnabled {
   329  		return l
   330  	}
   331  	l.w.byte(debugLogPC)
   332  	l.w.uvarint(uint64(x))
   333  	return l
   334  }
   335  
   336  //go:nosplit
   337  func (l *dlogger) traceback(x []uintptr) *dlogger {
   338  	if !dlogEnabled {
   339  		return l
   340  	}
   341  	l.w.byte(debugLogTraceback)
   342  	l.w.uvarint(uint64(len(x)))
   343  	for _, pc := range x {
   344  		l.w.uvarint(uint64(pc))
   345  	}
   346  	return l
   347  }
   348  
   349  // A debugLogWriter is a ring buffer of binary debug log records.
   350  //
   351  // A log record consists of a 2-byte framing header and a sequence of
   352  // fields. The framing header gives the size of the record as a little
   353  // endian 16-bit value. Each field starts with a byte indicating its
   354  // type, followed by type-specific data. If the size in the framing
   355  // header is 0, it's a sync record consisting of two little endian
   356  // 64-bit values giving a new time base.
   357  //
   358  // Because this is a ring buffer, new records will eventually
   359  // overwrite old records. Hence, it maintains a reader that consumes
   360  // the log as it gets overwritten. That reader state is where an
   361  // actual log reader would start.
   362  type debugLogWriter struct {
   363  	_     sys.NotInHeap
   364  	write uint64
   365  	data  debugLogBuf
   366  
   367  	// tick and nano are the time bases from the most recently
   368  	// written sync record.
   369  	tick, nano uint64
   370  
   371  	// r is a reader that consumes records as they get overwritten
   372  	// by the writer. It also acts as the initial reader state
   373  	// when printing the log.
   374  	r debugLogReader
   375  
   376  	// buf is a scratch buffer for encoding. This is here to
   377  	// reduce stack usage.
   378  	buf [10]byte
   379  }
   380  
   381  type debugLogBuf struct {
   382  	_ sys.NotInHeap
   383  	b [debugLogBytes]byte
   384  }
   385  
   386  const (
   387  	// debugLogHeaderSize is the number of bytes in the framing
   388  	// header of every dlog record.
   389  	debugLogHeaderSize = 2
   390  
   391  	// debugLogSyncSize is the number of bytes in a sync record.
   392  	debugLogSyncSize = debugLogHeaderSize + 2*8
   393  )
   394  
   395  //go:nosplit
   396  func (l *debugLogWriter) ensure(n uint64) {
   397  	for l.write+n >= l.r.begin+uint64(len(l.data.b)) {
   398  		// Consume record at begin.
   399  		if l.r.skip() == ^uint64(0) {
   400  			// Wrapped around within a record.
   401  			//
   402  			// TODO(austin): It would be better to just
   403  			// eat the whole buffer at this point, but we
   404  			// have to communicate that to the reader
   405  			// somehow.
   406  			throw("record wrapped around")
   407  		}
   408  	}
   409  }
   410  
   411  //go:nosplit
   412  func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool {
   413  	l.data.b[pos%uint64(len(l.data.b))] = uint8(size)
   414  	l.data.b[(pos+1)%uint64(len(l.data.b))] = uint8(size >> 8)
   415  	return size <= 0xFFFF
   416  }
   417  
   418  //go:nosplit
   419  func (l *debugLogWriter) writeSync(tick, nano uint64) {
   420  	l.tick, l.nano = tick, nano
   421  	l.ensure(debugLogHeaderSize)
   422  	l.writeFrameAt(l.write, 0)
   423  	l.write += debugLogHeaderSize
   424  	l.writeUint64LE(tick)
   425  	l.writeUint64LE(nano)
   426  	l.r.end = l.write
   427  }
   428  
   429  //go:nosplit
   430  func (l *debugLogWriter) writeUint64LE(x uint64) {
   431  	var b [8]byte
   432  	b[0] = byte(x)
   433  	b[1] = byte(x >> 8)
   434  	b[2] = byte(x >> 16)
   435  	b[3] = byte(x >> 24)
   436  	b[4] = byte(x >> 32)
   437  	b[5] = byte(x >> 40)
   438  	b[6] = byte(x >> 48)
   439  	b[7] = byte(x >> 56)
   440  	l.bytes(b[:])
   441  }
   442  
   443  //go:nosplit
   444  func (l *debugLogWriter) byte(x byte) {
   445  	l.ensure(1)
   446  	pos := l.write
   447  	l.write++
   448  	l.data.b[pos%uint64(len(l.data.b))] = x
   449  }
   450  
   451  //go:nosplit
   452  func (l *debugLogWriter) bytes(x []byte) {
   453  	l.ensure(uint64(len(x)))
   454  	pos := l.write
   455  	l.write += uint64(len(x))
   456  	for len(x) > 0 {
   457  		n := copy(l.data.b[pos%uint64(len(l.data.b)):], x)
   458  		pos += uint64(n)
   459  		x = x[n:]
   460  	}
   461  }
   462  
   463  //go:nosplit
   464  func (l *debugLogWriter) varint(x int64) {
   465  	var u uint64
   466  	if x < 0 {
   467  		u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1
   468  	} else {
   469  		u = (uint64(x) << 1) // do not complement i, bit 0 is 0
   470  	}
   471  	l.uvarint(u)
   472  }
   473  
   474  //go:nosplit
   475  func (l *debugLogWriter) uvarint(u uint64) {
   476  	i := 0
   477  	for u >= 0x80 {
   478  		l.buf[i] = byte(u) | 0x80
   479  		u >>= 7
   480  		i++
   481  	}
   482  	l.buf[i] = byte(u)
   483  	i++
   484  	l.bytes(l.buf[:i])
   485  }
   486  
   487  type debugLogReader struct {
   488  	data *debugLogBuf
   489  
   490  	// begin and end are the positions in the log of the beginning
   491  	// and end of the log data, modulo len(data).
   492  	begin, end uint64
   493  
   494  	// tick and nano are the current time base at begin.
   495  	tick, nano uint64
   496  }
   497  
   498  //go:nosplit
   499  func (r *debugLogReader) skip() uint64 {
   500  	// Read size at pos.
   501  	if r.begin+debugLogHeaderSize > r.end {
   502  		return ^uint64(0)
   503  	}
   504  	size := uint64(r.readUint16LEAt(r.begin))
   505  	if size == 0 {
   506  		// Sync packet.
   507  		r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
   508  		r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
   509  		size = debugLogSyncSize
   510  	}
   511  	if r.begin+size > r.end {
   512  		return ^uint64(0)
   513  	}
   514  	r.begin += size
   515  	return size
   516  }
   517  
   518  //go:nosplit
   519  func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 {
   520  	return uint16(r.data.b[pos%uint64(len(r.data.b))]) |
   521  		uint16(r.data.b[(pos+1)%uint64(len(r.data.b))])<<8
   522  }
   523  
   524  //go:nosplit
   525  func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 {
   526  	var b [8]byte
   527  	for i := range b {
   528  		b[i] = r.data.b[pos%uint64(len(r.data.b))]
   529  		pos++
   530  	}
   531  	return uint64(b[0]) | uint64(b[1])<<8 |
   532  		uint64(b[2])<<16 | uint64(b[3])<<24 |
   533  		uint64(b[4])<<32 | uint64(b[5])<<40 |
   534  		uint64(b[6])<<48 | uint64(b[7])<<56
   535  }
   536  
   537  func (r *debugLogReader) peek() (tick uint64) {
   538  	// Consume any sync records.
   539  	size := uint64(0)
   540  	for size == 0 {
   541  		if r.begin+debugLogHeaderSize > r.end {
   542  			return ^uint64(0)
   543  		}
   544  		size = uint64(r.readUint16LEAt(r.begin))
   545  		if size != 0 {
   546  			break
   547  		}
   548  		if r.begin+debugLogSyncSize > r.end {
   549  			return ^uint64(0)
   550  		}
   551  		// Sync packet.
   552  		r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
   553  		r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
   554  		r.begin += debugLogSyncSize
   555  	}
   556  
   557  	// Peek tick delta.
   558  	if r.begin+size > r.end {
   559  		return ^uint64(0)
   560  	}
   561  	pos := r.begin + debugLogHeaderSize
   562  	var u uint64
   563  	for i := uint(0); ; i += 7 {
   564  		b := r.data.b[pos%uint64(len(r.data.b))]
   565  		pos++
   566  		u |= uint64(b&^0x80) << i
   567  		if b&0x80 == 0 {
   568  			break
   569  		}
   570  	}
   571  	if pos > r.begin+size {
   572  		return ^uint64(0)
   573  	}
   574  	return r.tick + u
   575  }
   576  
   577  func (r *debugLogReader) header() (end, tick, nano uint64, p int) {
   578  	// Read size. We've already skipped sync packets and checked
   579  	// bounds in peek.
   580  	size := uint64(r.readUint16LEAt(r.begin))
   581  	end = r.begin + size
   582  	r.begin += debugLogHeaderSize
   583  
   584  	// Read tick, nano, and p.
   585  	tick = r.uvarint() + r.tick
   586  	nano = r.uvarint() + r.nano
   587  	p = int(r.varint())
   588  
   589  	return
   590  }
   591  
   592  func (r *debugLogReader) uvarint() uint64 {
   593  	var u uint64
   594  	for i := uint(0); ; i += 7 {
   595  		b := r.data.b[r.begin%uint64(len(r.data.b))]
   596  		r.begin++
   597  		u |= uint64(b&^0x80) << i
   598  		if b&0x80 == 0 {
   599  			break
   600  		}
   601  	}
   602  	return u
   603  }
   604  
   605  func (r *debugLogReader) varint() int64 {
   606  	u := r.uvarint()
   607  	var v int64
   608  	if u&1 == 0 {
   609  		v = int64(u >> 1)
   610  	} else {
   611  		v = ^int64(u >> 1)
   612  	}
   613  	return v
   614  }
   615  
   616  func (r *debugLogReader) printVal() bool {
   617  	typ := r.data.b[r.begin%uint64(len(r.data.b))]
   618  	r.begin++
   619  
   620  	switch typ {
   621  	default:
   622  		print("<unknown field type ", hex(typ), " pos ", r.begin-1, " end ", r.end, ">\n")
   623  		return false
   624  
   625  	case debugLogUnknown:
   626  		print("<unknown kind>")
   627  
   628  	case debugLogBoolTrue:
   629  		print(true)
   630  
   631  	case debugLogBoolFalse:
   632  		print(false)
   633  
   634  	case debugLogInt:
   635  		print(r.varint())
   636  
   637  	case debugLogUint:
   638  		print(r.uvarint())
   639  
   640  	case debugLogHex, debugLogPtr:
   641  		print(hex(r.uvarint()))
   642  
   643  	case debugLogString:
   644  		sl := r.uvarint()
   645  		if r.begin+sl > r.end {
   646  			r.begin = r.end
   647  			print("<string length corrupted>")
   648  			break
   649  		}
   650  		for sl > 0 {
   651  			b := r.data.b[r.begin%uint64(len(r.data.b)):]
   652  			if uint64(len(b)) > sl {
   653  				b = b[:sl]
   654  			}
   655  			r.begin += uint64(len(b))
   656  			sl -= uint64(len(b))
   657  			gwrite(b)
   658  		}
   659  
   660  	case debugLogConstString:
   661  		len, ptr := int(r.uvarint()), uintptr(r.uvarint())
   662  		ptr += firstmoduledata.etext
   663  		// We can't use unsafe.String as it may panic, which isn't safe
   664  		// in this (potentially) nowritebarrier context.
   665  		str := stringStruct{
   666  			str: unsafe.Pointer(ptr),
   667  			len: len,
   668  		}
   669  		s := *(*string)(unsafe.Pointer(&str))
   670  		print(s)
   671  
   672  	case debugLogStringOverflow:
   673  		print("..(", r.uvarint(), " more bytes)..")
   674  
   675  	case debugLogPC:
   676  		printDebugLogPC(uintptr(r.uvarint()), false)
   677  
   678  	case debugLogTraceback:
   679  		n := int(r.uvarint())
   680  		for i := 0; i < n; i++ {
   681  			print("\n\t")
   682  			// gentraceback PCs are always return PCs.
   683  			// Convert them to call PCs.
   684  			//
   685  			// TODO(austin): Expand inlined frames.
   686  			printDebugLogPC(uintptr(r.uvarint()), true)
   687  		}
   688  	}
   689  
   690  	return true
   691  }
   692  
   693  // printDebugLog prints the debug log.
   694  func printDebugLog() {
   695  	if !dlogEnabled {
   696  		return
   697  	}
   698  
   699  	// This function should not panic or throw since it is used in
   700  	// the fatal panic path and this may deadlock.
   701  
   702  	printlock()
   703  
   704  	// Get the list of all debug logs.
   705  	allp := (*uintptr)(unsafe.Pointer(&allDloggers))
   706  	all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp)))
   707  
   708  	// Count the logs.
   709  	n := 0
   710  	for l := all; l != nil; l = l.allLink {
   711  		n++
   712  	}
   713  	if n == 0 {
   714  		printunlock()
   715  		return
   716  	}
   717  
   718  	// Prepare read state for all logs.
   719  	type readState struct {
   720  		debugLogReader
   721  		first    bool
   722  		lost     uint64
   723  		nextTick uint64
   724  	}
   725  	// Use sysAllocOS instead of sysAlloc because we want to interfere
   726  	// with the runtime as little as possible, and sysAlloc updates accounting.
   727  	state1 := sysAllocOS(unsafe.Sizeof(readState{}) * uintptr(n))
   728  	if state1 == nil {
   729  		println("failed to allocate read state for", n, "logs")
   730  		printunlock()
   731  		return
   732  	}
   733  	state := (*[1 << 20]readState)(state1)[:n]
   734  	{
   735  		l := all
   736  		for i := range state {
   737  			s := &state[i]
   738  			s.debugLogReader = l.w.r
   739  			s.first = true
   740  			s.lost = l.w.r.begin
   741  			s.nextTick = s.peek()
   742  			l = l.allLink
   743  		}
   744  	}
   745  
   746  	// Print records.
   747  	for {
   748  		// Find the next record.
   749  		var best struct {
   750  			tick uint64
   751  			i    int
   752  		}
   753  		best.tick = ^uint64(0)
   754  		for i := range state {
   755  			if state[i].nextTick < best.tick {
   756  				best.tick = state[i].nextTick
   757  				best.i = i
   758  			}
   759  		}
   760  		if best.tick == ^uint64(0) {
   761  			break
   762  		}
   763  
   764  		// Print record.
   765  		s := &state[best.i]
   766  		if s.first {
   767  			print(">> begin log ", best.i)
   768  			if s.lost != 0 {
   769  				print("; lost first ", s.lost>>10, "KB")
   770  			}
   771  			print(" <<\n")
   772  			s.first = false
   773  		}
   774  
   775  		end, _, nano, p := s.header()
   776  		oldEnd := s.end
   777  		s.end = end
   778  
   779  		print("[")
   780  		var tmpbuf [21]byte
   781  		pnano := int64(nano) - runtimeInitTime
   782  		if pnano < 0 {
   783  			// Logged before runtimeInitTime was set.
   784  			pnano = 0
   785  		}
   786  		pnanoBytes := itoaDiv(tmpbuf[:], uint64(pnano), 9)
   787  		print(slicebytetostringtmp((*byte)(noescape(unsafe.Pointer(&pnanoBytes[0]))), len(pnanoBytes)))
   788  		print(" P ", p, "] ")
   789  
   790  		for i := 0; s.begin < s.end; i++ {
   791  			if i > 0 {
   792  				print(" ")
   793  			}
   794  			if !s.printVal() {
   795  				// Abort this P log.
   796  				print("<aborting P log>")
   797  				end = oldEnd
   798  				break
   799  			}
   800  		}
   801  		println()
   802  
   803  		// Move on to the next record.
   804  		s.begin = end
   805  		s.end = oldEnd
   806  		s.nextTick = s.peek()
   807  	}
   808  
   809  	printunlock()
   810  }
   811  
   812  // printDebugLogPC prints a single symbolized PC. If returnPC is true,
   813  // pc is a return PC that must first be converted to a call PC.
   814  func printDebugLogPC(pc uintptr, returnPC bool) {
   815  	fn := findfunc(pc)
   816  	if returnPC && (!fn.valid() || pc > fn.entry()) {
   817  		// TODO(austin): Don't back up if the previous frame
   818  		// was a sigpanic.
   819  		pc--
   820  	}
   821  
   822  	print(hex(pc))
   823  	if !fn.valid() {
   824  		print(" [unknown PC]")
   825  	} else {
   826  		name := funcname(fn)
   827  		file, line := funcline(fn, pc)
   828  		print(" [", name, "+", hex(pc-fn.entry()),
   829  			" ", file, ":", line, "]")
   830  	}
   831  }
   832  

View as plain text