Source file
src/runtime/trace.go
Documentation: runtime
1
2
3
4
5
6
7
8
9
10
11
12
13 package runtime
14
15 import (
16 "runtime/internal/atomic"
17 "runtime/internal/sys"
18 "unsafe"
19 )
20
21
22 const (
23 traceEvNone = 0
24 traceEvBatch = 1
25 traceEvFrequency = 2
26 traceEvStack = 3
27 traceEvGomaxprocs = 4
28 traceEvProcStart = 5
29 traceEvProcStop = 6
30 traceEvGCStart = 7
31 traceEvGCDone = 8
32 traceEvGCSTWStart = 9
33 traceEvGCSTWDone = 10
34 traceEvGCSweepStart = 11
35 traceEvGCSweepDone = 12
36 traceEvGoCreate = 13
37 traceEvGoStart = 14
38 traceEvGoEnd = 15
39 traceEvGoStop = 16
40 traceEvGoSched = 17
41 traceEvGoPreempt = 18
42 traceEvGoSleep = 19
43 traceEvGoBlock = 20
44 traceEvGoUnblock = 21
45 traceEvGoBlockSend = 22
46 traceEvGoBlockRecv = 23
47 traceEvGoBlockSelect = 24
48 traceEvGoBlockSync = 25
49 traceEvGoBlockCond = 26
50 traceEvGoBlockNet = 27
51 traceEvGoSysCall = 28
52 traceEvGoSysExit = 29
53 traceEvGoSysBlock = 30
54 traceEvGoWaiting = 31
55 traceEvGoInSyscall = 32
56 traceEvHeapAlloc = 33
57 traceEvNextGC = 34
58 traceEvTimerGoroutine = 35
59 traceEvFutileWakeup = 36
60 traceEvString = 37
61 traceEvGoStartLocal = 38
62 traceEvGoUnblockLocal = 39
63 traceEvGoSysExitLocal = 40
64 traceEvGoStartLabel = 41
65 traceEvGoBlockGC = 42
66 traceEvGCMarkAssistStart = 43
67 traceEvGCMarkAssistDone = 44
68 traceEvUserTaskCreate = 45
69 traceEvUserTaskEnd = 46
70 traceEvUserRegion = 47
71 traceEvUserLog = 48
72 traceEvCount = 49
73
74
75
76 )
77
78 const (
79
80
81
82
83
84
85
86
87
88 traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64)
89
90
91
92 traceStackSize = 128
93
94 traceGlobProc = -1
95
96 traceBytesPerNumber = 10
97
98 traceArgCountShift = 6
99
100
101
102
103
104
105 traceFutileWakeup byte = 128
106 )
107
108
109 var trace struct {
110 lock mutex
111 lockOwner *g
112 enabled bool
113 shutdown bool
114 headerWritten bool
115 footerWritten bool
116 shutdownSema uint32
117 seqStart uint64
118 ticksStart int64
119 ticksEnd int64
120 timeStart int64
121 timeEnd int64
122 seqGC uint64
123 reading traceBufPtr
124 empty traceBufPtr
125 fullHead traceBufPtr
126 fullTail traceBufPtr
127 reader guintptr
128 stackTab traceStackTable
129
130
131
132
133
134
135
136 stringsLock mutex
137 strings map[string]uint64
138 stringSeq uint64
139
140
141 markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
142
143 bufLock mutex
144 buf traceBufPtr
145 }
146
147
148 type traceBufHeader struct {
149 link traceBufPtr
150 lastTicks uint64
151 pos int
152 stk [traceStackSize]uintptr
153 }
154
155
156
157
158 type traceBuf struct {
159 traceBufHeader
160 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte
161 }
162
163
164
165
166
167
168
169
170 type traceBufPtr uintptr
171
172 func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
173 func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
174 func traceBufPtrOf(b *traceBuf) traceBufPtr {
175 return traceBufPtr(unsafe.Pointer(b))
176 }
177
178
179
180
181
182
183 func StartTrace() error {
184
185
186
187
188
189 stopTheWorldGC("start tracing")
190
191
192 lock(&sched.sysmonlock)
193
194
195
196
197
198
199 lock(&trace.bufLock)
200
201 if trace.enabled || trace.shutdown {
202 unlock(&trace.bufLock)
203 unlock(&sched.sysmonlock)
204 startTheWorldGC()
205 return errorString("tracing is already enabled")
206 }
207
208
209
210
211
212
213
214
215 _g_ := getg()
216 _g_.m.startingtrace = true
217
218
219 mp := acquirem()
220 stkBuf := make([]uintptr, traceStackSize)
221 stackID := traceStackID(mp, stkBuf, 2)
222 releasem(mp)
223
224 for _, gp := range allgs {
225 status := readgstatus(gp)
226 if status != _Gdead {
227 gp.traceseq = 0
228 gp.tracelastp = getg().m.p
229
230 id := trace.stackTab.put([]uintptr{gp.startpc + sys.PCQuantum})
231 traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
232 }
233 if status == _Gwaiting {
234
235 gp.traceseq++
236 traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
237 }
238 if status == _Gsyscall {
239 gp.traceseq++
240 traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
241 } else {
242 gp.sysblocktraced = false
243 }
244 }
245 traceProcStart()
246 traceGoStart()
247
248
249
250
251 trace.ticksStart = cputicks()
252 trace.timeStart = nanotime()
253 trace.headerWritten = false
254 trace.footerWritten = false
255
256
257
258
259 trace.stringSeq = 0
260 trace.strings = make(map[string]uint64)
261
262 trace.seqGC = 0
263 _g_.m.startingtrace = false
264 trace.enabled = true
265
266
267 _, pid, bufp := traceAcquireBuffer()
268 for i, label := range gcMarkWorkerModeStrings[:] {
269 trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
270 }
271 traceReleaseBuffer(pid)
272
273 unlock(&trace.bufLock)
274
275 unlock(&sched.sysmonlock)
276
277 startTheWorldGC()
278 return nil
279 }
280
281
282
283 func StopTrace() {
284
285
286 stopTheWorldGC("stop tracing")
287
288
289 lock(&sched.sysmonlock)
290
291
292 lock(&trace.bufLock)
293
294 if !trace.enabled {
295 unlock(&trace.bufLock)
296 unlock(&sched.sysmonlock)
297 startTheWorldGC()
298 return
299 }
300
301 traceGoSched()
302
303
304
305 for _, p := range allp[:cap(allp)] {
306 buf := p.tracebuf
307 if buf != 0 {
308 traceFullQueue(buf)
309 p.tracebuf = 0
310 }
311 }
312 if trace.buf != 0 {
313 buf := trace.buf
314 trace.buf = 0
315 if buf.ptr().pos != 0 {
316 traceFullQueue(buf)
317 }
318 }
319
320 for {
321 trace.ticksEnd = cputicks()
322 trace.timeEnd = nanotime()
323
324 if trace.timeEnd != trace.timeStart {
325 break
326 }
327 osyield()
328 }
329
330 trace.enabled = false
331 trace.shutdown = true
332 unlock(&trace.bufLock)
333
334 unlock(&sched.sysmonlock)
335
336 startTheWorldGC()
337
338
339
340 semacquire(&trace.shutdownSema)
341 if raceenabled {
342 raceacquire(unsafe.Pointer(&trace.shutdownSema))
343 }
344
345
346 lock(&trace.lock)
347 for _, p := range allp[:cap(allp)] {
348 if p.tracebuf != 0 {
349 throw("trace: non-empty trace buffer in proc")
350 }
351 }
352 if trace.buf != 0 {
353 throw("trace: non-empty global trace buffer")
354 }
355 if trace.fullHead != 0 || trace.fullTail != 0 {
356 throw("trace: non-empty full trace buffer")
357 }
358 if trace.reading != 0 || trace.reader != 0 {
359 throw("trace: reading after shutdown")
360 }
361 for trace.empty != 0 {
362 buf := trace.empty
363 trace.empty = buf.ptr().link
364 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
365 }
366 trace.strings = nil
367 trace.shutdown = false
368 unlock(&trace.lock)
369 }
370
371
372
373
374
375
376 func ReadTrace() []byte {
377
378
379
380
381
382
383 lock(&trace.lock)
384 trace.lockOwner = getg()
385
386 if trace.reader != 0 {
387
388
389
390 trace.lockOwner = nil
391 unlock(&trace.lock)
392 println("runtime: ReadTrace called from multiple goroutines simultaneously")
393 return nil
394 }
395
396 if buf := trace.reading; buf != 0 {
397 buf.ptr().link = trace.empty
398 trace.empty = buf
399 trace.reading = 0
400 }
401
402 if !trace.headerWritten {
403 trace.headerWritten = true
404 trace.lockOwner = nil
405 unlock(&trace.lock)
406 return []byte("go 1.11 trace\x00\x00\x00")
407 }
408
409 if trace.fullHead == 0 && !trace.shutdown {
410 trace.reader.set(getg())
411 goparkunlock(&trace.lock, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
412 lock(&trace.lock)
413 }
414
415 if trace.fullHead != 0 {
416 buf := traceFullDequeue()
417 trace.reading = buf
418 trace.lockOwner = nil
419 unlock(&trace.lock)
420 return buf.ptr().arr[:buf.ptr().pos]
421 }
422
423 if !trace.footerWritten {
424 trace.footerWritten = true
425
426 freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
427 trace.lockOwner = nil
428 unlock(&trace.lock)
429 var data []byte
430 data = append(data, traceEvFrequency|0<<traceArgCountShift)
431 data = traceAppend(data, uint64(freq))
432
433
434 trace.stackTab.dump()
435 return data
436 }
437
438 if trace.shutdown {
439 trace.lockOwner = nil
440 unlock(&trace.lock)
441 if raceenabled {
442
443
444
445 racerelease(unsafe.Pointer(&trace.shutdownSema))
446 }
447
448 semrelease(&trace.shutdownSema)
449 return nil
450 }
451
452 trace.lockOwner = nil
453 unlock(&trace.lock)
454 println("runtime: spurious wakeup of trace reader")
455 return nil
456 }
457
458
459 func traceReader() *g {
460 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
461 return nil
462 }
463 lock(&trace.lock)
464 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
465 unlock(&trace.lock)
466 return nil
467 }
468 gp := trace.reader.ptr()
469 trace.reader.set(nil)
470 unlock(&trace.lock)
471 return gp
472 }
473
474
475 func traceProcFree(pp *p) {
476 buf := pp.tracebuf
477 pp.tracebuf = 0
478 if buf == 0 {
479 return
480 }
481 lock(&trace.lock)
482 traceFullQueue(buf)
483 unlock(&trace.lock)
484 }
485
486
487 func traceFullQueue(buf traceBufPtr) {
488 buf.ptr().link = 0
489 if trace.fullHead == 0 {
490 trace.fullHead = buf
491 } else {
492 trace.fullTail.ptr().link = buf
493 }
494 trace.fullTail = buf
495 }
496
497
498 func traceFullDequeue() traceBufPtr {
499 buf := trace.fullHead
500 if buf == 0 {
501 return 0
502 }
503 trace.fullHead = buf.ptr().link
504 if trace.fullHead == 0 {
505 trace.fullTail = 0
506 }
507 buf.ptr().link = 0
508 return buf
509 }
510
511
512
513
514
515
516 func traceEvent(ev byte, skip int, args ...uint64) {
517 mp, pid, bufp := traceAcquireBuffer()
518
519
520
521
522
523
524
525
526
527
528
529 if !trace.enabled && !mp.startingtrace {
530 traceReleaseBuffer(pid)
531 return
532 }
533
534 if skip > 0 {
535 if getg() == mp.curg {
536 skip++
537 }
538 }
539 traceEventLocked(0, mp, pid, bufp, ev, skip, args...)
540 traceReleaseBuffer(pid)
541 }
542
543 func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, skip int, args ...uint64) {
544 buf := bufp.ptr()
545
546 maxSize := 2 + 5*traceBytesPerNumber + extraBytes
547 if buf == nil || len(buf.arr)-buf.pos < maxSize {
548 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
549 bufp.set(buf)
550 }
551
552 ticks := uint64(cputicks()) / traceTickDiv
553 tickDiff := ticks - buf.lastTicks
554 buf.lastTicks = ticks
555 narg := byte(len(args))
556 if skip >= 0 {
557 narg++
558 }
559
560
561 if narg > 3 {
562 narg = 3
563 }
564 startPos := buf.pos
565 buf.byte(ev | narg<<traceArgCountShift)
566 var lenp *byte
567 if narg == 3 {
568
569 buf.varint(0)
570 lenp = &buf.arr[buf.pos-1]
571 }
572 buf.varint(tickDiff)
573 for _, a := range args {
574 buf.varint(a)
575 }
576 if skip == 0 {
577 buf.varint(0)
578 } else if skip > 0 {
579 buf.varint(traceStackID(mp, buf.stk[:], skip))
580 }
581 evSize := buf.pos - startPos
582 if evSize > maxSize {
583 throw("invalid length of trace event")
584 }
585 if lenp != nil {
586
587 *lenp = byte(evSize - 2)
588 }
589 }
590
591 func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
592 _g_ := getg()
593 gp := mp.curg
594 var nstk int
595 if gp == _g_ {
596 nstk = callers(skip+1, buf)
597 } else if gp != nil {
598 gp = mp.curg
599 nstk = gcallers(gp, skip, buf)
600 }
601 if nstk > 0 {
602 nstk--
603 }
604 if nstk > 0 && gp.goid == 1 {
605 nstk--
606 }
607 id := trace.stackTab.put(buf[:nstk])
608 return uint64(id)
609 }
610
611
612 func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
613 mp = acquirem()
614 if p := mp.p.ptr(); p != nil {
615 return mp, p.id, &p.tracebuf
616 }
617 lock(&trace.bufLock)
618 return mp, traceGlobProc, &trace.buf
619 }
620
621
622 func traceReleaseBuffer(pid int32) {
623 if pid == traceGlobProc {
624 unlock(&trace.bufLock)
625 }
626 releasem(getg().m)
627 }
628
629
630 func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
631 owner := trace.lockOwner
632 dolock := owner == nil || owner != getg().m.curg
633 if dolock {
634 lock(&trace.lock)
635 }
636 if buf != 0 {
637 traceFullQueue(buf)
638 }
639 if trace.empty != 0 {
640 buf = trace.empty
641 trace.empty = buf.ptr().link
642 } else {
643 buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
644 if buf == 0 {
645 throw("trace: out of memory")
646 }
647 }
648 bufp := buf.ptr()
649 bufp.link.set(nil)
650 bufp.pos = 0
651
652
653 ticks := uint64(cputicks()) / traceTickDiv
654 bufp.lastTicks = ticks
655 bufp.byte(traceEvBatch | 1<<traceArgCountShift)
656 bufp.varint(uint64(pid))
657 bufp.varint(ticks)
658
659 if dolock {
660 unlock(&trace.lock)
661 }
662 return buf
663 }
664
665
666 func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
667 if s == "" {
668 return 0, bufp
669 }
670
671 lock(&trace.stringsLock)
672 if raceenabled {
673
674
675 raceacquire(unsafe.Pointer(&trace.stringsLock))
676 }
677
678 if id, ok := trace.strings[s]; ok {
679 if raceenabled {
680 racerelease(unsafe.Pointer(&trace.stringsLock))
681 }
682 unlock(&trace.stringsLock)
683
684 return id, bufp
685 }
686
687 trace.stringSeq++
688 id := trace.stringSeq
689 trace.strings[s] = id
690
691 if raceenabled {
692 racerelease(unsafe.Pointer(&trace.stringsLock))
693 }
694 unlock(&trace.stringsLock)
695
696
697
698
699
700
701 buf := bufp.ptr()
702 size := 1 + 2*traceBytesPerNumber + len(s)
703 if buf == nil || len(buf.arr)-buf.pos < size {
704 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
705 bufp.set(buf)
706 }
707 buf.byte(traceEvString)
708 buf.varint(id)
709
710
711
712 slen := len(s)
713 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
714 slen = room
715 }
716
717 buf.varint(uint64(slen))
718 buf.pos += copy(buf.arr[buf.pos:], s[:slen])
719
720 bufp.set(buf)
721 return id, bufp
722 }
723
724
725 func traceAppend(buf []byte, v uint64) []byte {
726 for ; v >= 0x80; v >>= 7 {
727 buf = append(buf, 0x80|byte(v))
728 }
729 buf = append(buf, byte(v))
730 return buf
731 }
732
733
734 func (buf *traceBuf) varint(v uint64) {
735 pos := buf.pos
736 for ; v >= 0x80; v >>= 7 {
737 buf.arr[pos] = 0x80 | byte(v)
738 pos++
739 }
740 buf.arr[pos] = byte(v)
741 pos++
742 buf.pos = pos
743 }
744
745
746 func (buf *traceBuf) byte(v byte) {
747 buf.arr[buf.pos] = v
748 buf.pos++
749 }
750
751
752
753 type traceStackTable struct {
754 lock mutex
755 seq uint32
756 mem traceAlloc
757 tab [1 << 13]traceStackPtr
758 }
759
760
761 type traceStack struct {
762 link traceStackPtr
763 hash uintptr
764 id uint32
765 n int
766 stk [0]uintptr
767 }
768
769 type traceStackPtr uintptr
770
771 func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
772
773
774 func (ts *traceStack) stack() []uintptr {
775 return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
776 }
777
778
779
780 func (tab *traceStackTable) put(pcs []uintptr) uint32 {
781 if len(pcs) == 0 {
782 return 0
783 }
784 hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
785
786 if id := tab.find(pcs, hash); id != 0 {
787 return id
788 }
789
790 lock(&tab.lock)
791 if id := tab.find(pcs, hash); id != 0 {
792 unlock(&tab.lock)
793 return id
794 }
795
796 tab.seq++
797 stk := tab.newStack(len(pcs))
798 stk.hash = hash
799 stk.id = tab.seq
800 stk.n = len(pcs)
801 stkpc := stk.stack()
802 for i, pc := range pcs {
803 stkpc[i] = pc
804 }
805 part := int(hash % uintptr(len(tab.tab)))
806 stk.link = tab.tab[part]
807 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
808 unlock(&tab.lock)
809 return stk.id
810 }
811
812
813 func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
814 part := int(hash % uintptr(len(tab.tab)))
815 Search:
816 for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
817 if stk.hash == hash && stk.n == len(pcs) {
818 for i, stkpc := range stk.stack() {
819 if stkpc != pcs[i] {
820 continue Search
821 }
822 }
823 return stk.id
824 }
825 }
826 return 0
827 }
828
829
830 func (tab *traceStackTable) newStack(n int) *traceStack {
831 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
832 }
833
834
835 func allFrames(pcs []uintptr) []Frame {
836 frames := make([]Frame, 0, len(pcs))
837 ci := CallersFrames(pcs)
838 for {
839 f, more := ci.Next()
840 frames = append(frames, f)
841 if !more {
842 return frames
843 }
844 }
845 }
846
847
848
849 func (tab *traceStackTable) dump() {
850 var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
851 bufp := traceFlush(0, 0)
852 for _, stk := range tab.tab {
853 stk := stk.ptr()
854 for ; stk != nil; stk = stk.link.ptr() {
855 tmpbuf := tmp[:0]
856 tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
857 frames := allFrames(stk.stack())
858 tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
859 for _, f := range frames {
860 var frame traceFrame
861 frame, bufp = traceFrameForPC(bufp, 0, f)
862 tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
863 tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
864 tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
865 tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
866 }
867
868 size := 1 + traceBytesPerNumber + len(tmpbuf)
869 if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
870 bufp = traceFlush(bufp, 0)
871 }
872 buf := bufp.ptr()
873 buf.byte(traceEvStack | 3<<traceArgCountShift)
874 buf.varint(uint64(len(tmpbuf)))
875 buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
876 }
877 }
878
879 lock(&trace.lock)
880 traceFullQueue(bufp)
881 unlock(&trace.lock)
882
883 tab.mem.drop()
884 *tab = traceStackTable{}
885 lockInit(&((*tab).lock), lockRankTraceStackTab)
886 }
887
888 type traceFrame struct {
889 funcID uint64
890 fileID uint64
891 line uint64
892 }
893
894
895
896 func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
897 bufp := &buf
898 var frame traceFrame
899
900 fn := f.Function
901 const maxLen = 1 << 10
902 if len(fn) > maxLen {
903 fn = fn[len(fn)-maxLen:]
904 }
905 frame.funcID, bufp = traceString(bufp, pid, fn)
906 frame.line = uint64(f.Line)
907 file := f.File
908 if len(file) > maxLen {
909 file = file[len(file)-maxLen:]
910 }
911 frame.fileID, bufp = traceString(bufp, pid, file)
912 return frame, (*bufp)
913 }
914
915
916
917 type traceAlloc struct {
918 head traceAllocBlockPtr
919 off uintptr
920 }
921
922
923
924
925
926
927
928
929 type traceAllocBlock struct {
930 next traceAllocBlockPtr
931 data [64<<10 - sys.PtrSize]byte
932 }
933
934
935 type traceAllocBlockPtr uintptr
936
937 func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
938 func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
939
940
941 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
942 n = alignUp(n, sys.PtrSize)
943 if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
944 if n > uintptr(len(a.head.ptr().data)) {
945 throw("trace: alloc too large")
946 }
947 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
948 if block == nil {
949 throw("trace: out of memory")
950 }
951 block.next.set(a.head.ptr())
952 a.head.set(block)
953 a.off = 0
954 }
955 p := &a.head.ptr().data[a.off]
956 a.off += n
957 return unsafe.Pointer(p)
958 }
959
960
961 func (a *traceAlloc) drop() {
962 for a.head != 0 {
963 block := a.head.ptr()
964 a.head.set(block.next.ptr())
965 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
966 }
967 }
968
969
970
971 func traceGomaxprocs(procs int32) {
972 traceEvent(traceEvGomaxprocs, 1, uint64(procs))
973 }
974
975 func traceProcStart() {
976 traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
977 }
978
979 func traceProcStop(pp *p) {
980
981
982 mp := acquirem()
983 oldp := mp.p
984 mp.p.set(pp)
985 traceEvent(traceEvProcStop, -1)
986 mp.p = oldp
987 releasem(mp)
988 }
989
990 func traceGCStart() {
991 traceEvent(traceEvGCStart, 3, trace.seqGC)
992 trace.seqGC++
993 }
994
995 func traceGCDone() {
996 traceEvent(traceEvGCDone, -1)
997 }
998
999 func traceGCSTWStart(kind int) {
1000 traceEvent(traceEvGCSTWStart, -1, uint64(kind))
1001 }
1002
1003 func traceGCSTWDone() {
1004 traceEvent(traceEvGCSTWDone, -1)
1005 }
1006
1007
1008
1009
1010
1011
1012 func traceGCSweepStart() {
1013
1014
1015 _p_ := getg().m.p.ptr()
1016 if _p_.traceSweep {
1017 throw("double traceGCSweepStart")
1018 }
1019 _p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
1020 }
1021
1022
1023
1024
1025
1026 func traceGCSweepSpan(bytesSwept uintptr) {
1027 _p_ := getg().m.p.ptr()
1028 if _p_.traceSweep {
1029 if _p_.traceSwept == 0 {
1030 traceEvent(traceEvGCSweepStart, 1)
1031 }
1032 _p_.traceSwept += bytesSwept
1033 }
1034 }
1035
1036 func traceGCSweepDone() {
1037 _p_ := getg().m.p.ptr()
1038 if !_p_.traceSweep {
1039 throw("missing traceGCSweepStart")
1040 }
1041 if _p_.traceSwept != 0 {
1042 traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
1043 }
1044 _p_.traceSweep = false
1045 }
1046
1047 func traceGCMarkAssistStart() {
1048 traceEvent(traceEvGCMarkAssistStart, 1)
1049 }
1050
1051 func traceGCMarkAssistDone() {
1052 traceEvent(traceEvGCMarkAssistDone, -1)
1053 }
1054
1055 func traceGoCreate(newg *g, pc uintptr) {
1056 newg.traceseq = 0
1057 newg.tracelastp = getg().m.p
1058
1059 id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum})
1060 traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
1061 }
1062
1063 func traceGoStart() {
1064 _g_ := getg().m.curg
1065 _p_ := _g_.m.p
1066 _g_.traceseq++
1067 if _p_.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
1068 traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
1069 } else if _g_.tracelastp == _p_ {
1070 traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
1071 } else {
1072 _g_.tracelastp = _p_
1073 traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
1074 }
1075 }
1076
1077 func traceGoEnd() {
1078 traceEvent(traceEvGoEnd, -1)
1079 }
1080
1081 func traceGoSched() {
1082 _g_ := getg()
1083 _g_.tracelastp = _g_.m.p
1084 traceEvent(traceEvGoSched, 1)
1085 }
1086
1087 func traceGoPreempt() {
1088 _g_ := getg()
1089 _g_.tracelastp = _g_.m.p
1090 traceEvent(traceEvGoPreempt, 1)
1091 }
1092
1093 func traceGoPark(traceEv byte, skip int) {
1094 if traceEv&traceFutileWakeup != 0 {
1095 traceEvent(traceEvFutileWakeup, -1)
1096 }
1097 traceEvent(traceEv & ^traceFutileWakeup, skip)
1098 }
1099
1100 func traceGoUnpark(gp *g, skip int) {
1101 _p_ := getg().m.p
1102 gp.traceseq++
1103 if gp.tracelastp == _p_ {
1104 traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
1105 } else {
1106 gp.tracelastp = _p_
1107 traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
1108 }
1109 }
1110
1111 func traceGoSysCall() {
1112 traceEvent(traceEvGoSysCall, 1)
1113 }
1114
1115 func traceGoSysExit(ts int64) {
1116 if ts != 0 && ts < trace.ticksStart {
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126 ts = 0
1127 }
1128 _g_ := getg().m.curg
1129 _g_.traceseq++
1130 _g_.tracelastp = _g_.m.p
1131 traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
1132 }
1133
1134 func traceGoSysBlock(pp *p) {
1135
1136
1137 mp := acquirem()
1138 oldp := mp.p
1139 mp.p.set(pp)
1140 traceEvent(traceEvGoSysBlock, -1)
1141 mp.p = oldp
1142 releasem(mp)
1143 }
1144
1145 func traceHeapAlloc() {
1146 traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
1147 }
1148
1149 func traceNextGC() {
1150 if nextGC := atomic.Load64(&memstats.next_gc); nextGC == ^uint64(0) {
1151
1152 traceEvent(traceEvNextGC, -1, 0)
1153 } else {
1154 traceEvent(traceEvNextGC, -1, nextGC)
1155 }
1156 }
1157
1158
1159
1160
1161
1162 func trace_userTaskCreate(id, parentID uint64, taskType string) {
1163 if !trace.enabled {
1164 return
1165 }
1166
1167
1168 mp, pid, bufp := traceAcquireBuffer()
1169 if !trace.enabled && !mp.startingtrace {
1170 traceReleaseBuffer(pid)
1171 return
1172 }
1173
1174 typeStringID, bufp := traceString(bufp, pid, taskType)
1175 traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 3, id, parentID, typeStringID)
1176 traceReleaseBuffer(pid)
1177 }
1178
1179
1180 func trace_userTaskEnd(id uint64) {
1181 traceEvent(traceEvUserTaskEnd, 2, id)
1182 }
1183
1184
1185 func trace_userRegion(id, mode uint64, name string) {
1186 if !trace.enabled {
1187 return
1188 }
1189
1190 mp, pid, bufp := traceAcquireBuffer()
1191 if !trace.enabled && !mp.startingtrace {
1192 traceReleaseBuffer(pid)
1193 return
1194 }
1195
1196 nameStringID, bufp := traceString(bufp, pid, name)
1197 traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 3, id, mode, nameStringID)
1198 traceReleaseBuffer(pid)
1199 }
1200
1201
1202 func trace_userLog(id uint64, category, message string) {
1203 if !trace.enabled {
1204 return
1205 }
1206
1207 mp, pid, bufp := traceAcquireBuffer()
1208 if !trace.enabled && !mp.startingtrace {
1209 traceReleaseBuffer(pid)
1210 return
1211 }
1212
1213 categoryID, bufp := traceString(bufp, pid, category)
1214
1215 extraSpace := traceBytesPerNumber + len(message)
1216 traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 3, id, categoryID)
1217
1218
1219 buf := bufp.ptr()
1220
1221
1222
1223 slen := len(message)
1224 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
1225 slen = room
1226 }
1227 buf.varint(uint64(slen))
1228 buf.pos += copy(buf.arr[buf.pos:], message[:slen])
1229
1230 traceReleaseBuffer(pid)
1231 }
1232
View as plain text