Source file
src/runtime/trace.go
Documentation: runtime
1
2
3
4
5
6
7
8
9
10
11
12
13 package runtime
14
15 import (
16 "runtime/internal/sys"
17 "unsafe"
18 )
19
20
21 const (
22 traceEvNone = 0
23 traceEvBatch = 1
24 traceEvFrequency = 2
25 traceEvStack = 3
26 traceEvGomaxprocs = 4
27 traceEvProcStart = 5
28 traceEvProcStop = 6
29 traceEvGCStart = 7
30 traceEvGCDone = 8
31 traceEvGCSTWStart = 9
32 traceEvGCSTWDone = 10
33 traceEvGCSweepStart = 11
34 traceEvGCSweepDone = 12
35 traceEvGoCreate = 13
36 traceEvGoStart = 14
37 traceEvGoEnd = 15
38 traceEvGoStop = 16
39 traceEvGoSched = 17
40 traceEvGoPreempt = 18
41 traceEvGoSleep = 19
42 traceEvGoBlock = 20
43 traceEvGoUnblock = 21
44 traceEvGoBlockSend = 22
45 traceEvGoBlockRecv = 23
46 traceEvGoBlockSelect = 24
47 traceEvGoBlockSync = 25
48 traceEvGoBlockCond = 26
49 traceEvGoBlockNet = 27
50 traceEvGoSysCall = 28
51 traceEvGoSysExit = 29
52 traceEvGoSysBlock = 30
53 traceEvGoWaiting = 31
54 traceEvGoInSyscall = 32
55 traceEvHeapAlloc = 33
56 traceEvNextGC = 34
57 traceEvTimerGoroutine = 35
58 traceEvFutileWakeup = 36
59 traceEvString = 37
60 traceEvGoStartLocal = 38
61 traceEvGoUnblockLocal = 39
62 traceEvGoSysExitLocal = 40
63 traceEvGoStartLabel = 41
64 traceEvGoBlockGC = 42
65 traceEvGCMarkAssistStart = 43
66 traceEvGCMarkAssistDone = 44
67 traceEvUserTaskCreate = 45
68 traceEvUserTaskEnd = 46
69 traceEvUserRegion = 47
70 traceEvUserLog = 48
71 traceEvCount = 49
72
73
74
75 )
76
77 const (
78
79
80
81
82
83
84
85
86
87 traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64)
88
89
90
91 traceStackSize = 128
92
93 traceGlobProc = -1
94
95 traceBytesPerNumber = 10
96
97 traceArgCountShift = 6
98
99
100
101
102
103
104 traceFutileWakeup byte = 128
105 )
106
107
108 var trace struct {
109 lock mutex
110 lockOwner *g
111 enabled bool
112 shutdown bool
113 headerWritten bool
114 footerWritten bool
115 shutdownSema uint32
116 seqStart uint64
117 ticksStart int64
118 ticksEnd int64
119 timeStart int64
120 timeEnd int64
121 seqGC uint64
122 reading traceBufPtr
123 empty traceBufPtr
124 fullHead traceBufPtr
125 fullTail traceBufPtr
126 reader guintptr
127 stackTab traceStackTable
128
129
130
131
132
133
134
135 stringsLock mutex
136 strings map[string]uint64
137 stringSeq uint64
138
139
140 markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
141
142 bufLock mutex
143 buf traceBufPtr
144 }
145
146
147 type traceBufHeader struct {
148 link traceBufPtr
149 lastTicks uint64
150 pos int
151 stk [traceStackSize]uintptr
152 }
153
154
155
156
157 type traceBuf struct {
158 traceBufHeader
159 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte
160 }
161
162
163
164
165
166
167
168
169 type traceBufPtr uintptr
170
171 func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
172 func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
173 func traceBufPtrOf(b *traceBuf) traceBufPtr {
174 return traceBufPtr(unsafe.Pointer(b))
175 }
176
177
178
179
180
181
182 func StartTrace() error {
183
184
185
186
187
188 stopTheWorldGC("start tracing")
189
190
191 lock(&sched.sysmonlock)
192
193
194
195
196
197
198 lock(&trace.bufLock)
199
200 if trace.enabled || trace.shutdown {
201 unlock(&trace.bufLock)
202 unlock(&sched.sysmonlock)
203 startTheWorldGC()
204 return errorString("tracing is already enabled")
205 }
206
207
208
209
210
211
212
213
214 _g_ := getg()
215 _g_.m.startingtrace = true
216
217
218 mp := acquirem()
219 stkBuf := make([]uintptr, traceStackSize)
220 stackID := traceStackID(mp, stkBuf, 2)
221 releasem(mp)
222
223 for _, gp := range allgs {
224 status := readgstatus(gp)
225 if status != _Gdead {
226 gp.traceseq = 0
227 gp.tracelastp = getg().m.p
228
229 id := trace.stackTab.put([]uintptr{gp.startpc + sys.PCQuantum})
230 traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
231 }
232 if status == _Gwaiting {
233
234 gp.traceseq++
235 traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
236 }
237 if status == _Gsyscall {
238 gp.traceseq++
239 traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
240 } else {
241 gp.sysblocktraced = false
242 }
243 }
244 traceProcStart()
245 traceGoStart()
246
247
248
249
250 trace.ticksStart = cputicks()
251 trace.timeStart = nanotime()
252 trace.headerWritten = false
253 trace.footerWritten = false
254
255
256
257
258 trace.stringSeq = 0
259 trace.strings = make(map[string]uint64)
260
261 trace.seqGC = 0
262 _g_.m.startingtrace = false
263 trace.enabled = true
264
265
266 _, pid, bufp := traceAcquireBuffer()
267 for i, label := range gcMarkWorkerModeStrings[:] {
268 trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
269 }
270 traceReleaseBuffer(pid)
271
272 unlock(&trace.bufLock)
273
274 unlock(&sched.sysmonlock)
275
276 startTheWorldGC()
277 return nil
278 }
279
280
281
282 func StopTrace() {
283
284
285 stopTheWorldGC("stop tracing")
286
287
288 lock(&sched.sysmonlock)
289
290
291 lock(&trace.bufLock)
292
293 if !trace.enabled {
294 unlock(&trace.bufLock)
295 unlock(&sched.sysmonlock)
296 startTheWorldGC()
297 return
298 }
299
300 traceGoSched()
301
302
303
304 for _, p := range allp[:cap(allp)] {
305 buf := p.tracebuf
306 if buf != 0 {
307 traceFullQueue(buf)
308 p.tracebuf = 0
309 }
310 }
311 if trace.buf != 0 {
312 buf := trace.buf
313 trace.buf = 0
314 if buf.ptr().pos != 0 {
315 traceFullQueue(buf)
316 }
317 }
318
319 for {
320 trace.ticksEnd = cputicks()
321 trace.timeEnd = nanotime()
322
323 if trace.timeEnd != trace.timeStart {
324 break
325 }
326 osyield()
327 }
328
329 trace.enabled = false
330 trace.shutdown = true
331 unlock(&trace.bufLock)
332
333 unlock(&sched.sysmonlock)
334
335 startTheWorldGC()
336
337
338
339 semacquire(&trace.shutdownSema)
340 if raceenabled {
341 raceacquire(unsafe.Pointer(&trace.shutdownSema))
342 }
343
344
345 lock(&trace.lock)
346 for _, p := range allp[:cap(allp)] {
347 if p.tracebuf != 0 {
348 throw("trace: non-empty trace buffer in proc")
349 }
350 }
351 if trace.buf != 0 {
352 throw("trace: non-empty global trace buffer")
353 }
354 if trace.fullHead != 0 || trace.fullTail != 0 {
355 throw("trace: non-empty full trace buffer")
356 }
357 if trace.reading != 0 || trace.reader != 0 {
358 throw("trace: reading after shutdown")
359 }
360 for trace.empty != 0 {
361 buf := trace.empty
362 trace.empty = buf.ptr().link
363 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
364 }
365 trace.strings = nil
366 trace.shutdown = false
367 unlock(&trace.lock)
368 }
369
370
371
372
373
374
375 func ReadTrace() []byte {
376
377
378
379
380
381
382 lock(&trace.lock)
383 trace.lockOwner = getg()
384
385 if trace.reader != 0 {
386
387
388
389 trace.lockOwner = nil
390 unlock(&trace.lock)
391 println("runtime: ReadTrace called from multiple goroutines simultaneously")
392 return nil
393 }
394
395 if buf := trace.reading; buf != 0 {
396 buf.ptr().link = trace.empty
397 trace.empty = buf
398 trace.reading = 0
399 }
400
401 if !trace.headerWritten {
402 trace.headerWritten = true
403 trace.lockOwner = nil
404 unlock(&trace.lock)
405 return []byte("go 1.11 trace\x00\x00\x00")
406 }
407
408 if trace.fullHead == 0 && !trace.shutdown {
409 trace.reader.set(getg())
410 goparkunlock(&trace.lock, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
411 lock(&trace.lock)
412 }
413
414 if trace.fullHead != 0 {
415 buf := traceFullDequeue()
416 trace.reading = buf
417 trace.lockOwner = nil
418 unlock(&trace.lock)
419 return buf.ptr().arr[:buf.ptr().pos]
420 }
421
422 if !trace.footerWritten {
423 trace.footerWritten = true
424
425 freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
426 trace.lockOwner = nil
427 unlock(&trace.lock)
428 var data []byte
429 data = append(data, traceEvFrequency|0<<traceArgCountShift)
430 data = traceAppend(data, uint64(freq))
431
432
433 trace.stackTab.dump()
434 return data
435 }
436
437 if trace.shutdown {
438 trace.lockOwner = nil
439 unlock(&trace.lock)
440 if raceenabled {
441
442
443
444 racerelease(unsafe.Pointer(&trace.shutdownSema))
445 }
446
447 semrelease(&trace.shutdownSema)
448 return nil
449 }
450
451 trace.lockOwner = nil
452 unlock(&trace.lock)
453 println("runtime: spurious wakeup of trace reader")
454 return nil
455 }
456
457
458 func traceReader() *g {
459 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
460 return nil
461 }
462 lock(&trace.lock)
463 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
464 unlock(&trace.lock)
465 return nil
466 }
467 gp := trace.reader.ptr()
468 trace.reader.set(nil)
469 unlock(&trace.lock)
470 return gp
471 }
472
473
474 func traceProcFree(pp *p) {
475 buf := pp.tracebuf
476 pp.tracebuf = 0
477 if buf == 0 {
478 return
479 }
480 lock(&trace.lock)
481 traceFullQueue(buf)
482 unlock(&trace.lock)
483 }
484
485
486 func traceFullQueue(buf traceBufPtr) {
487 buf.ptr().link = 0
488 if trace.fullHead == 0 {
489 trace.fullHead = buf
490 } else {
491 trace.fullTail.ptr().link = buf
492 }
493 trace.fullTail = buf
494 }
495
496
497 func traceFullDequeue() traceBufPtr {
498 buf := trace.fullHead
499 if buf == 0 {
500 return 0
501 }
502 trace.fullHead = buf.ptr().link
503 if trace.fullHead == 0 {
504 trace.fullTail = 0
505 }
506 buf.ptr().link = 0
507 return buf
508 }
509
510
511
512
513
514
515 func traceEvent(ev byte, skip int, args ...uint64) {
516 mp, pid, bufp := traceAcquireBuffer()
517
518
519
520
521
522
523
524
525
526
527
528 if !trace.enabled && !mp.startingtrace {
529 traceReleaseBuffer(pid)
530 return
531 }
532
533 if skip > 0 {
534 if getg() == mp.curg {
535 skip++
536 }
537 }
538 traceEventLocked(0, mp, pid, bufp, ev, skip, args...)
539 traceReleaseBuffer(pid)
540 }
541
542 func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, skip int, args ...uint64) {
543 buf := bufp.ptr()
544
545 maxSize := 2 + 5*traceBytesPerNumber + extraBytes
546 if buf == nil || len(buf.arr)-buf.pos < maxSize {
547 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
548 bufp.set(buf)
549 }
550
551 ticks := uint64(cputicks()) / traceTickDiv
552 tickDiff := ticks - buf.lastTicks
553 buf.lastTicks = ticks
554 narg := byte(len(args))
555 if skip >= 0 {
556 narg++
557 }
558
559
560 if narg > 3 {
561 narg = 3
562 }
563 startPos := buf.pos
564 buf.byte(ev | narg<<traceArgCountShift)
565 var lenp *byte
566 if narg == 3 {
567
568 buf.varint(0)
569 lenp = &buf.arr[buf.pos-1]
570 }
571 buf.varint(tickDiff)
572 for _, a := range args {
573 buf.varint(a)
574 }
575 if skip == 0 {
576 buf.varint(0)
577 } else if skip > 0 {
578 buf.varint(traceStackID(mp, buf.stk[:], skip))
579 }
580 evSize := buf.pos - startPos
581 if evSize > maxSize {
582 throw("invalid length of trace event")
583 }
584 if lenp != nil {
585
586 *lenp = byte(evSize - 2)
587 }
588 }
589
590 func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
591 _g_ := getg()
592 gp := mp.curg
593 var nstk int
594 if gp == _g_ {
595 nstk = callers(skip+1, buf)
596 } else if gp != nil {
597 gp = mp.curg
598 nstk = gcallers(gp, skip, buf)
599 }
600 if nstk > 0 {
601 nstk--
602 }
603 if nstk > 0 && gp.goid == 1 {
604 nstk--
605 }
606 id := trace.stackTab.put(buf[:nstk])
607 return uint64(id)
608 }
609
610
611 func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
612 mp = acquirem()
613 if p := mp.p.ptr(); p != nil {
614 return mp, p.id, &p.tracebuf
615 }
616 lock(&trace.bufLock)
617 return mp, traceGlobProc, &trace.buf
618 }
619
620
621 func traceReleaseBuffer(pid int32) {
622 if pid == traceGlobProc {
623 unlock(&trace.bufLock)
624 }
625 releasem(getg().m)
626 }
627
628
629 func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
630 owner := trace.lockOwner
631 dolock := owner == nil || owner != getg().m.curg
632 if dolock {
633 lock(&trace.lock)
634 }
635 if buf != 0 {
636 traceFullQueue(buf)
637 }
638 if trace.empty != 0 {
639 buf = trace.empty
640 trace.empty = buf.ptr().link
641 } else {
642 buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
643 if buf == 0 {
644 throw("trace: out of memory")
645 }
646 }
647 bufp := buf.ptr()
648 bufp.link.set(nil)
649 bufp.pos = 0
650
651
652 ticks := uint64(cputicks()) / traceTickDiv
653 bufp.lastTicks = ticks
654 bufp.byte(traceEvBatch | 1<<traceArgCountShift)
655 bufp.varint(uint64(pid))
656 bufp.varint(ticks)
657
658 if dolock {
659 unlock(&trace.lock)
660 }
661 return buf
662 }
663
664
665 func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
666 if s == "" {
667 return 0, bufp
668 }
669
670 lock(&trace.stringsLock)
671 if raceenabled {
672
673
674 raceacquire(unsafe.Pointer(&trace.stringsLock))
675 }
676
677 if id, ok := trace.strings[s]; ok {
678 if raceenabled {
679 racerelease(unsafe.Pointer(&trace.stringsLock))
680 }
681 unlock(&trace.stringsLock)
682
683 return id, bufp
684 }
685
686 trace.stringSeq++
687 id := trace.stringSeq
688 trace.strings[s] = id
689
690 if raceenabled {
691 racerelease(unsafe.Pointer(&trace.stringsLock))
692 }
693 unlock(&trace.stringsLock)
694
695
696
697
698
699
700 buf := bufp.ptr()
701 size := 1 + 2*traceBytesPerNumber + len(s)
702 if buf == nil || len(buf.arr)-buf.pos < size {
703 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
704 bufp.set(buf)
705 }
706 buf.byte(traceEvString)
707 buf.varint(id)
708
709
710
711 slen := len(s)
712 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
713 slen = room
714 }
715
716 buf.varint(uint64(slen))
717 buf.pos += copy(buf.arr[buf.pos:], s[:slen])
718
719 bufp.set(buf)
720 return id, bufp
721 }
722
723
724 func traceAppend(buf []byte, v uint64) []byte {
725 for ; v >= 0x80; v >>= 7 {
726 buf = append(buf, 0x80|byte(v))
727 }
728 buf = append(buf, byte(v))
729 return buf
730 }
731
732
733 func (buf *traceBuf) varint(v uint64) {
734 pos := buf.pos
735 for ; v >= 0x80; v >>= 7 {
736 buf.arr[pos] = 0x80 | byte(v)
737 pos++
738 }
739 buf.arr[pos] = byte(v)
740 pos++
741 buf.pos = pos
742 }
743
744
745 func (buf *traceBuf) byte(v byte) {
746 buf.arr[buf.pos] = v
747 buf.pos++
748 }
749
750
751
752 type traceStackTable struct {
753 lock mutex
754 seq uint32
755 mem traceAlloc
756 tab [1 << 13]traceStackPtr
757 }
758
759
760 type traceStack struct {
761 link traceStackPtr
762 hash uintptr
763 id uint32
764 n int
765 stk [0]uintptr
766 }
767
768 type traceStackPtr uintptr
769
770 func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
771
772
773 func (ts *traceStack) stack() []uintptr {
774 return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
775 }
776
777
778
779 func (tab *traceStackTable) put(pcs []uintptr) uint32 {
780 if len(pcs) == 0 {
781 return 0
782 }
783 hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
784
785 if id := tab.find(pcs, hash); id != 0 {
786 return id
787 }
788
789 lock(&tab.lock)
790 if id := tab.find(pcs, hash); id != 0 {
791 unlock(&tab.lock)
792 return id
793 }
794
795 tab.seq++
796 stk := tab.newStack(len(pcs))
797 stk.hash = hash
798 stk.id = tab.seq
799 stk.n = len(pcs)
800 stkpc := stk.stack()
801 for i, pc := range pcs {
802 stkpc[i] = pc
803 }
804 part := int(hash % uintptr(len(tab.tab)))
805 stk.link = tab.tab[part]
806 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
807 unlock(&tab.lock)
808 return stk.id
809 }
810
811
812 func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
813 part := int(hash % uintptr(len(tab.tab)))
814 Search:
815 for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
816 if stk.hash == hash && stk.n == len(pcs) {
817 for i, stkpc := range stk.stack() {
818 if stkpc != pcs[i] {
819 continue Search
820 }
821 }
822 return stk.id
823 }
824 }
825 return 0
826 }
827
828
829 func (tab *traceStackTable) newStack(n int) *traceStack {
830 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
831 }
832
833
834 func allFrames(pcs []uintptr) []Frame {
835 frames := make([]Frame, 0, len(pcs))
836 ci := CallersFrames(pcs)
837 for {
838 f, more := ci.Next()
839 frames = append(frames, f)
840 if !more {
841 return frames
842 }
843 }
844 }
845
846
847
848 func (tab *traceStackTable) dump() {
849 var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
850 bufp := traceFlush(0, 0)
851 for _, stk := range tab.tab {
852 stk := stk.ptr()
853 for ; stk != nil; stk = stk.link.ptr() {
854 tmpbuf := tmp[:0]
855 tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
856 frames := allFrames(stk.stack())
857 tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
858 for _, f := range frames {
859 var frame traceFrame
860 frame, bufp = traceFrameForPC(bufp, 0, f)
861 tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
862 tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
863 tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
864 tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
865 }
866
867 size := 1 + traceBytesPerNumber + len(tmpbuf)
868 if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
869 bufp = traceFlush(bufp, 0)
870 }
871 buf := bufp.ptr()
872 buf.byte(traceEvStack | 3<<traceArgCountShift)
873 buf.varint(uint64(len(tmpbuf)))
874 buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
875 }
876 }
877
878 lock(&trace.lock)
879 traceFullQueue(bufp)
880 unlock(&trace.lock)
881
882 tab.mem.drop()
883 *tab = traceStackTable{}
884 lockInit(&((*tab).lock), lockRankTraceStackTab)
885 }
886
887 type traceFrame struct {
888 funcID uint64
889 fileID uint64
890 line uint64
891 }
892
893
894
895 func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
896 bufp := &buf
897 var frame traceFrame
898
899 fn := f.Function
900 const maxLen = 1 << 10
901 if len(fn) > maxLen {
902 fn = fn[len(fn)-maxLen:]
903 }
904 frame.funcID, bufp = traceString(bufp, pid, fn)
905 frame.line = uint64(f.Line)
906 file := f.File
907 if len(file) > maxLen {
908 file = file[len(file)-maxLen:]
909 }
910 frame.fileID, bufp = traceString(bufp, pid, file)
911 return frame, (*bufp)
912 }
913
914
915
916 type traceAlloc struct {
917 head traceAllocBlockPtr
918 off uintptr
919 }
920
921
922
923
924
925
926
927
928 type traceAllocBlock struct {
929 next traceAllocBlockPtr
930 data [64<<10 - sys.PtrSize]byte
931 }
932
933
934 type traceAllocBlockPtr uintptr
935
936 func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
937 func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
938
939
940 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
941 n = alignUp(n, sys.PtrSize)
942 if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
943 if n > uintptr(len(a.head.ptr().data)) {
944 throw("trace: alloc too large")
945 }
946 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
947 if block == nil {
948 throw("trace: out of memory")
949 }
950 block.next.set(a.head.ptr())
951 a.head.set(block)
952 a.off = 0
953 }
954 p := &a.head.ptr().data[a.off]
955 a.off += n
956 return unsafe.Pointer(p)
957 }
958
959
960 func (a *traceAlloc) drop() {
961 for a.head != 0 {
962 block := a.head.ptr()
963 a.head.set(block.next.ptr())
964 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
965 }
966 }
967
968
969
970 func traceGomaxprocs(procs int32) {
971 traceEvent(traceEvGomaxprocs, 1, uint64(procs))
972 }
973
974 func traceProcStart() {
975 traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
976 }
977
978 func traceProcStop(pp *p) {
979
980
981 mp := acquirem()
982 oldp := mp.p
983 mp.p.set(pp)
984 traceEvent(traceEvProcStop, -1)
985 mp.p = oldp
986 releasem(mp)
987 }
988
989 func traceGCStart() {
990 traceEvent(traceEvGCStart, 3, trace.seqGC)
991 trace.seqGC++
992 }
993
994 func traceGCDone() {
995 traceEvent(traceEvGCDone, -1)
996 }
997
998 func traceGCSTWStart(kind int) {
999 traceEvent(traceEvGCSTWStart, -1, uint64(kind))
1000 }
1001
1002 func traceGCSTWDone() {
1003 traceEvent(traceEvGCSTWDone, -1)
1004 }
1005
1006
1007
1008
1009
1010
1011 func traceGCSweepStart() {
1012
1013
1014 _p_ := getg().m.p.ptr()
1015 if _p_.traceSweep {
1016 throw("double traceGCSweepStart")
1017 }
1018 _p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
1019 }
1020
1021
1022
1023
1024
1025 func traceGCSweepSpan(bytesSwept uintptr) {
1026 _p_ := getg().m.p.ptr()
1027 if _p_.traceSweep {
1028 if _p_.traceSwept == 0 {
1029 traceEvent(traceEvGCSweepStart, 1)
1030 }
1031 _p_.traceSwept += bytesSwept
1032 }
1033 }
1034
1035 func traceGCSweepDone() {
1036 _p_ := getg().m.p.ptr()
1037 if !_p_.traceSweep {
1038 throw("missing traceGCSweepStart")
1039 }
1040 if _p_.traceSwept != 0 {
1041 traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
1042 }
1043 _p_.traceSweep = false
1044 }
1045
1046 func traceGCMarkAssistStart() {
1047 traceEvent(traceEvGCMarkAssistStart, 1)
1048 }
1049
1050 func traceGCMarkAssistDone() {
1051 traceEvent(traceEvGCMarkAssistDone, -1)
1052 }
1053
1054 func traceGoCreate(newg *g, pc uintptr) {
1055 newg.traceseq = 0
1056 newg.tracelastp = getg().m.p
1057
1058 id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum})
1059 traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
1060 }
1061
1062 func traceGoStart() {
1063 _g_ := getg().m.curg
1064 _p_ := _g_.m.p
1065 _g_.traceseq++
1066 if _g_ == _p_.ptr().gcBgMarkWorker.ptr() {
1067 traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
1068 } else if _g_.tracelastp == _p_ {
1069 traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
1070 } else {
1071 _g_.tracelastp = _p_
1072 traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
1073 }
1074 }
1075
1076 func traceGoEnd() {
1077 traceEvent(traceEvGoEnd, -1)
1078 }
1079
1080 func traceGoSched() {
1081 _g_ := getg()
1082 _g_.tracelastp = _g_.m.p
1083 traceEvent(traceEvGoSched, 1)
1084 }
1085
1086 func traceGoPreempt() {
1087 _g_ := getg()
1088 _g_.tracelastp = _g_.m.p
1089 traceEvent(traceEvGoPreempt, 1)
1090 }
1091
1092 func traceGoPark(traceEv byte, skip int) {
1093 if traceEv&traceFutileWakeup != 0 {
1094 traceEvent(traceEvFutileWakeup, -1)
1095 }
1096 traceEvent(traceEv & ^traceFutileWakeup, skip)
1097 }
1098
1099 func traceGoUnpark(gp *g, skip int) {
1100 _p_ := getg().m.p
1101 gp.traceseq++
1102 if gp.tracelastp == _p_ {
1103 traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
1104 } else {
1105 gp.tracelastp = _p_
1106 traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
1107 }
1108 }
1109
1110 func traceGoSysCall() {
1111 traceEvent(traceEvGoSysCall, 1)
1112 }
1113
1114 func traceGoSysExit(ts int64) {
1115 if ts != 0 && ts < trace.ticksStart {
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125 ts = 0
1126 }
1127 _g_ := getg().m.curg
1128 _g_.traceseq++
1129 _g_.tracelastp = _g_.m.p
1130 traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
1131 }
1132
1133 func traceGoSysBlock(pp *p) {
1134
1135
1136 mp := acquirem()
1137 oldp := mp.p
1138 mp.p.set(pp)
1139 traceEvent(traceEvGoSysBlock, -1)
1140 mp.p = oldp
1141 releasem(mp)
1142 }
1143
1144 func traceHeapAlloc() {
1145 traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
1146 }
1147
1148 func traceNextGC() {
1149 if memstats.next_gc == ^uint64(0) {
1150
1151 traceEvent(traceEvNextGC, -1, 0)
1152 } else {
1153 traceEvent(traceEvNextGC, -1, memstats.next_gc)
1154 }
1155 }
1156
1157
1158
1159
1160
1161 func trace_userTaskCreate(id, parentID uint64, taskType string) {
1162 if !trace.enabled {
1163 return
1164 }
1165
1166
1167 mp, pid, bufp := traceAcquireBuffer()
1168 if !trace.enabled && !mp.startingtrace {
1169 traceReleaseBuffer(pid)
1170 return
1171 }
1172
1173 typeStringID, bufp := traceString(bufp, pid, taskType)
1174 traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 3, id, parentID, typeStringID)
1175 traceReleaseBuffer(pid)
1176 }
1177
1178
1179 func trace_userTaskEnd(id uint64) {
1180 traceEvent(traceEvUserTaskEnd, 2, id)
1181 }
1182
1183
1184 func trace_userRegion(id, mode uint64, name string) {
1185 if !trace.enabled {
1186 return
1187 }
1188
1189 mp, pid, bufp := traceAcquireBuffer()
1190 if !trace.enabled && !mp.startingtrace {
1191 traceReleaseBuffer(pid)
1192 return
1193 }
1194
1195 nameStringID, bufp := traceString(bufp, pid, name)
1196 traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 3, id, mode, nameStringID)
1197 traceReleaseBuffer(pid)
1198 }
1199
1200
1201 func trace_userLog(id uint64, category, message string) {
1202 if !trace.enabled {
1203 return
1204 }
1205
1206 mp, pid, bufp := traceAcquireBuffer()
1207 if !trace.enabled && !mp.startingtrace {
1208 traceReleaseBuffer(pid)
1209 return
1210 }
1211
1212 categoryID, bufp := traceString(bufp, pid, category)
1213
1214 extraSpace := traceBytesPerNumber + len(message)
1215 traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 3, id, categoryID)
1216
1217
1218 buf := bufp.ptr()
1219
1220
1221
1222 slen := len(message)
1223 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
1224 slen = room
1225 }
1226 buf.varint(uint64(slen))
1227 buf.pos += copy(buf.arr[buf.pos:], message[:slen])
1228
1229 traceReleaseBuffer(pid)
1230 }
1231
View as plain text