Source file
src/runtime/mgcmark.go
Documentation: runtime
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
12 "unsafe"
13 )
14
15 const (
16 fixedRootFinalizers = iota
17 fixedRootFreeGStacks
18 fixedRootCount
19
20
21
22 rootBlockBytes = 256 << 10
23
24
25
26
27
28
29
30
31 maxObletBytes = 128 << 10
32
33
34
35
36
37
38
39 drainCheckThreshold = 100000
40
41
42
43
44
45
46
47
48
49 pagesPerSpanRoot = 512
50
51
52
53 go115NewMarkrootSpans = true
54 )
55
56
57
58
59
60 func gcMarkRootPrepare() {
61 work.nFlushCacheRoots = 0
62
63
64 nBlocks := func(bytes uintptr) int {
65 return int(divRoundUp(bytes, rootBlockBytes))
66 }
67
68 work.nDataRoots = 0
69 work.nBSSRoots = 0
70
71
72 for _, datap := range activeModules() {
73 nDataRoots := nBlocks(datap.edata - datap.data)
74 if nDataRoots > work.nDataRoots {
75 work.nDataRoots = nDataRoots
76 }
77 }
78
79 for _, datap := range activeModules() {
80 nBSSRoots := nBlocks(datap.ebss - datap.bss)
81 if nBSSRoots > work.nBSSRoots {
82 work.nBSSRoots = nBSSRoots
83 }
84 }
85
86
87
88
89
90 if go115NewMarkrootSpans {
91
92
93
94
95
96
97
98 mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)]
99 work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
100 } else {
101
102
103
104
105
106 work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks()
107 }
108
109
110
111
112
113
114
115
116 work.nStackRoots = int(atomic.Loaduintptr(&allglen))
117
118 work.markrootNext = 0
119 work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
120 }
121
122
123
124 func gcMarkRootCheck() {
125 if work.markrootNext < work.markrootJobs {
126 print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
127 throw("left over markroot jobs")
128 }
129
130 lock(&allglock)
131
132 var gp *g
133 for i := 0; i < work.nStackRoots; i++ {
134 gp = allgs[i]
135 if !gp.gcscandone {
136 goto fail
137 }
138 }
139 unlock(&allglock)
140 return
141
142 fail:
143 println("gp", gp, "goid", gp.goid,
144 "status", readgstatus(gp),
145 "gcscandone", gp.gcscandone)
146 unlock(&allglock)
147 throw("scan missed a g")
148 }
149
150
151 var oneptrmask = [...]uint8{1}
152
153
154
155
156
157
158
159
160 func markroot(gcw *gcWork, i uint32) {
161
162
163 baseFlushCache := uint32(fixedRootCount)
164 baseData := baseFlushCache + uint32(work.nFlushCacheRoots)
165 baseBSS := baseData + uint32(work.nDataRoots)
166 baseSpans := baseBSS + uint32(work.nBSSRoots)
167 baseStacks := baseSpans + uint32(work.nSpanRoots)
168 end := baseStacks + uint32(work.nStackRoots)
169
170
171 switch {
172 case baseFlushCache <= i && i < baseData:
173 flushmcache(int(i - baseFlushCache))
174
175 case baseData <= i && i < baseBSS:
176 for _, datap := range activeModules() {
177 markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-baseData))
178 }
179
180 case baseBSS <= i && i < baseSpans:
181 for _, datap := range activeModules() {
182 markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-baseBSS))
183 }
184
185 case i == fixedRootFinalizers:
186 for fb := allfin; fb != nil; fb = fb.alllink {
187 cnt := uintptr(atomic.Load(&fb.cnt))
188 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
189 }
190
191 case i == fixedRootFreeGStacks:
192
193
194 systemstack(markrootFreeGStacks)
195
196 case baseSpans <= i && i < baseStacks:
197
198 markrootSpans(gcw, int(i-baseSpans))
199
200 default:
201
202 var gp *g
203 if baseStacks <= i && i < end {
204 gp = allgs[i-baseStacks]
205 } else {
206 throw("markroot: bad index")
207 }
208
209
210
211 status := readgstatus(gp)
212 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
213 gp.waitsince = work.tstart
214 }
215
216
217
218 systemstack(func() {
219
220
221
222
223 userG := getg().m.curg
224 selfScan := gp == userG && readgstatus(userG) == _Grunning
225 if selfScan {
226 casgstatus(userG, _Grunning, _Gwaiting)
227 userG.waitreason = waitReasonGarbageCollectionScan
228 }
229
230
231
232
233
234
235
236
237 stopped := suspendG(gp)
238 if stopped.dead {
239 gp.gcscandone = true
240 return
241 }
242 if gp.gcscandone {
243 throw("g already scanned")
244 }
245 scanstack(gp, gcw)
246 gp.gcscandone = true
247 resumeG(stopped)
248
249 if selfScan {
250 casgstatus(userG, _Gwaiting, _Grunning)
251 }
252 })
253 }
254 }
255
256
257
258
259
260 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
261 if rootBlockBytes%(8*sys.PtrSize) != 0 {
262
263 throw("rootBlockBytes must be a multiple of 8*ptrSize")
264 }
265
266
267
268
269 off := uintptr(shard) * rootBlockBytes
270 if off >= n0 {
271 return
272 }
273 b := b0 + off
274 ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize))))
275 n := uintptr(rootBlockBytes)
276 if off+n > n0 {
277 n = n0 - off
278 }
279
280
281 scanblock(b, n, ptrmask, gcw, nil)
282 }
283
284
285
286
287
288 func markrootFreeGStacks() {
289
290 lock(&sched.gFree.lock)
291 list := sched.gFree.stack
292 sched.gFree.stack = gList{}
293 unlock(&sched.gFree.lock)
294 if list.empty() {
295 return
296 }
297
298
299 q := gQueue{list.head, list.head}
300 for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
301 stackfree(gp.stack)
302 gp.stack.lo = 0
303 gp.stack.hi = 0
304
305
306 q.tail.set(gp)
307 }
308
309
310 lock(&sched.gFree.lock)
311 sched.gFree.noStack.pushAll(q)
312 unlock(&sched.gFree.lock)
313 }
314
315
316
317
318 func markrootSpans(gcw *gcWork, shard int) {
319 if !go115NewMarkrootSpans {
320 oldMarkrootSpans(gcw, shard)
321 return
322 }
323
324
325
326
327
328
329
330
331
332 sg := mheap_.sweepgen
333
334
335 ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)]
336 ha := mheap_.arenas[ai.l1()][ai.l2()]
337 arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena)
338
339
340 specialsbits := ha.pageSpecials[arenaPage/8:]
341 specialsbits = specialsbits[:pagesPerSpanRoot/8]
342 for i := range specialsbits {
343
344 specials := atomic.Load8(&specialsbits[i])
345 if specials == 0 {
346 continue
347 }
348 for j := uint(0); j < 8; j++ {
349 if specials&(1<<j) == 0 {
350 continue
351 }
352
353
354
355
356
357
358 s := ha.spans[arenaPage+uint(i)*8+j]
359
360
361
362 if state := s.state.get(); state != mSpanInUse {
363 print("s.state = ", state, "\n")
364 throw("non in-use span found with specials bit set")
365 }
366
367 if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
368
369 print("sweep ", s.sweepgen, " ", sg, "\n")
370 throw("gc: unswept span")
371 }
372
373
374
375 lock(&s.speciallock)
376 for sp := s.specials; sp != nil; sp = sp.next {
377 if sp.kind != _KindSpecialFinalizer {
378 continue
379 }
380
381
382 spf := (*specialfinalizer)(unsafe.Pointer(sp))
383
384 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
385
386
387
388
389 scanobject(p, gcw)
390
391
392 scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
393 }
394 unlock(&s.speciallock)
395 }
396 }
397 }
398
399
400
401
402
403
404 func oldMarkrootSpans(gcw *gcWork, shard int) {
405
406
407
408
409
410
411
412
413
414
415
416
417
418 sg := mheap_.sweepgen
419 spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard)
420
421
422
423
424
425
426
427
428 for i := 0; i < len(spans); i++ {
429
430
431 s := (*mspan)(atomic.Loadp(unsafe.Pointer(&spans[i])))
432
433
434
435 if s == nil || s.state.get() != mSpanInUse {
436 continue
437 }
438
439 if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
440
441 print("sweep ", s.sweepgen, " ", sg, "\n")
442 throw("gc: unswept span")
443 }
444
445
446
447
448
449
450
451
452 if s.specials == nil {
453 continue
454 }
455
456
457
458 lock(&s.speciallock)
459
460 for sp := s.specials; sp != nil; sp = sp.next {
461 if sp.kind != _KindSpecialFinalizer {
462 continue
463 }
464
465
466 spf := (*specialfinalizer)(unsafe.Pointer(sp))
467
468 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
469
470
471
472
473 scanobject(p, gcw)
474
475
476 scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
477 }
478
479 unlock(&s.speciallock)
480 }
481 }
482
483
484
485
486
487 func gcAssistAlloc(gp *g) {
488
489
490 if getg() == gp.m.g0 {
491 return
492 }
493 if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
494 return
495 }
496
497 traced := false
498 retry:
499
500
501
502
503 debtBytes := -gp.gcAssistBytes
504 scanWork := int64(gcController.assistWorkPerByte * float64(debtBytes))
505 if scanWork < gcOverAssistWork {
506 scanWork = gcOverAssistWork
507 debtBytes = int64(gcController.assistBytesPerWork * float64(scanWork))
508 }
509
510
511
512
513
514
515
516 bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit)
517 stolen := int64(0)
518 if bgScanCredit > 0 {
519 if bgScanCredit < scanWork {
520 stolen = bgScanCredit
521 gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(stolen))
522 } else {
523 stolen = scanWork
524 gp.gcAssistBytes += debtBytes
525 }
526 atomic.Xaddint64(&gcController.bgScanCredit, -stolen)
527
528 scanWork -= stolen
529
530 if scanWork == 0 {
531
532
533 if traced {
534 traceGCMarkAssistDone()
535 }
536 return
537 }
538 }
539
540 if trace.enabled && !traced {
541 traced = true
542 traceGCMarkAssistStart()
543 }
544
545
546 systemstack(func() {
547 gcAssistAlloc1(gp, scanWork)
548
549
550 })
551
552 completed := gp.param != nil
553 gp.param = nil
554 if completed {
555 gcMarkDone()
556 }
557
558 if gp.gcAssistBytes < 0 {
559
560
561
562
563
564
565
566 if gp.preempt {
567 Gosched()
568 goto retry
569 }
570
571
572
573
574
575
576
577
578
579
580 if !gcParkAssist() {
581 goto retry
582 }
583
584
585
586 }
587 if traced {
588 traceGCMarkAssistDone()
589 }
590 }
591
592
593
594
595
596
597
598
599
600
601
602 func gcAssistAlloc1(gp *g, scanWork int64) {
603
604
605 gp.param = nil
606
607 if atomic.Load(&gcBlackenEnabled) == 0 {
608
609
610
611
612
613
614
615 gp.gcAssistBytes = 0
616 return
617 }
618
619
620
621 startTime := nanotime()
622
623 decnwait := atomic.Xadd(&work.nwait, -1)
624 if decnwait == work.nproc {
625 println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
626 throw("nwait > work.nprocs")
627 }
628
629
630 casgstatus(gp, _Grunning, _Gwaiting)
631 gp.waitreason = waitReasonGCAssistMarking
632
633
634
635 gcw := &getg().m.p.ptr().gcw
636 workDone := gcDrainN(gcw, scanWork)
637
638 casgstatus(gp, _Gwaiting, _Grunning)
639
640
641
642
643
644
645
646 gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(workDone))
647
648
649
650 incnwait := atomic.Xadd(&work.nwait, +1)
651 if incnwait > work.nproc {
652 println("runtime: work.nwait=", incnwait,
653 "work.nproc=", work.nproc)
654 throw("work.nwait > work.nproc")
655 }
656
657 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
658
659
660
661
662 gp.param = unsafe.Pointer(gp)
663 }
664 duration := nanotime() - startTime
665 _p_ := gp.m.p.ptr()
666 _p_.gcAssistTime += duration
667 if _p_.gcAssistTime > gcAssistTimeSlack {
668 atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime)
669 _p_.gcAssistTime = 0
670 }
671 }
672
673
674
675
676 func gcWakeAllAssists() {
677 lock(&work.assistQueue.lock)
678 list := work.assistQueue.q.popList()
679 injectglist(&list)
680 unlock(&work.assistQueue.lock)
681 }
682
683
684
685
686
687
688
689 func gcParkAssist() bool {
690 lock(&work.assistQueue.lock)
691
692
693
694 if atomic.Load(&gcBlackenEnabled) == 0 {
695 unlock(&work.assistQueue.lock)
696 return true
697 }
698
699 gp := getg()
700 oldList := work.assistQueue.q
701 work.assistQueue.q.pushBack(gp)
702
703
704
705
706
707 if atomic.Loadint64(&gcController.bgScanCredit) > 0 {
708 work.assistQueue.q = oldList
709 if oldList.tail != 0 {
710 oldList.tail.ptr().schedlink.set(nil)
711 }
712 unlock(&work.assistQueue.lock)
713 return false
714 }
715
716 goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2)
717 return true
718 }
719
720
721
722
723
724
725
726
727
728
729
730 func gcFlushBgCredit(scanWork int64) {
731 if work.assistQueue.q.empty() {
732
733
734
735
736 atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
737 return
738 }
739
740 scanBytes := int64(float64(scanWork) * gcController.assistBytesPerWork)
741
742 lock(&work.assistQueue.lock)
743 for !work.assistQueue.q.empty() && scanBytes > 0 {
744 gp := work.assistQueue.q.pop()
745
746
747 if scanBytes+gp.gcAssistBytes >= 0 {
748
749 scanBytes += gp.gcAssistBytes
750 gp.gcAssistBytes = 0
751
752
753
754
755
756
757 ready(gp, 0, false)
758 } else {
759
760 gp.gcAssistBytes += scanBytes
761 scanBytes = 0
762
763
764
765
766 work.assistQueue.q.pushBack(gp)
767 break
768 }
769 }
770
771 if scanBytes > 0 {
772
773 scanWork = int64(float64(scanBytes) * gcController.assistWorkPerByte)
774 atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
775 }
776 unlock(&work.assistQueue.lock)
777 }
778
779
780
781
782
783
784
785
786
787
788
789
790 func scanstack(gp *g, gcw *gcWork) {
791 if readgstatus(gp)&_Gscan == 0 {
792 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
793 throw("scanstack - bad status")
794 }
795
796 switch readgstatus(gp) &^ _Gscan {
797 default:
798 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
799 throw("mark - bad status")
800 case _Gdead:
801 return
802 case _Grunning:
803 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
804 throw("scanstack: goroutine not stopped")
805 case _Grunnable, _Gsyscall, _Gwaiting:
806
807 }
808
809 if gp == getg() {
810 throw("can't scan our own stack")
811 }
812
813 if isShrinkStackSafe(gp) {
814
815 shrinkstack(gp)
816 } else {
817
818 gp.preemptShrink = true
819 }
820
821 var state stackScanState
822 state.stack = gp.stack
823
824 if stackTraceDebug {
825 println("stack trace goroutine", gp.goid)
826 }
827
828 if debugScanConservative && gp.asyncSafePoint {
829 print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
830 }
831
832
833
834
835 if gp.sched.ctxt != nil {
836 scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw, &state)
837 }
838
839
840 scanframe := func(frame *stkframe, unused unsafe.Pointer) bool {
841 scanframeworker(frame, &state, gcw)
842 return true
843 }
844 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
845
846
847
848
849
850 tracebackdefers(gp, scanframe, nil)
851
852
853 for d := gp._defer; d != nil; d = d.link {
854 if d.fn != nil {
855
856
857 scanblock(uintptr(unsafe.Pointer(&d.fn)), sys.PtrSize, &oneptrmask[0], gcw, &state)
858 }
859 if d.link != nil {
860
861
862 scanblock(uintptr(unsafe.Pointer(&d.link)), sys.PtrSize, &oneptrmask[0], gcw, &state)
863 }
864
865
866
867 if d.heap {
868 scanblock(uintptr(unsafe.Pointer(&d)), sys.PtrSize, &oneptrmask[0], gcw, &state)
869 }
870 }
871 if gp._panic != nil {
872
873 state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false)
874 }
875
876
877
878
879
880
881 state.buildIndex()
882 for {
883 p, conservative := state.getPtr()
884 if p == 0 {
885 break
886 }
887 obj := state.findObject(p)
888 if obj == nil {
889 continue
890 }
891 t := obj.typ
892 if t == nil {
893
894 continue
895 }
896 obj.setType(nil)
897 if stackTraceDebug {
898 printlock()
899 print(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of type", t.string())
900 if conservative {
901 print(" (conservative)")
902 }
903 println()
904 printunlock()
905 }
906 gcdata := t.gcdata
907 var s *mspan
908 if t.kind&kindGCProg != 0 {
909
910
911
912
913
914
915
916
917
918 s = materializeGCProg(t.ptrdata, gcdata)
919 gcdata = (*byte)(unsafe.Pointer(s.startAddr))
920 }
921
922 b := state.stack.lo + uintptr(obj.off)
923 if conservative {
924 scanConservative(b, t.ptrdata, gcdata, gcw, &state)
925 } else {
926 scanblock(b, t.ptrdata, gcdata, gcw, &state)
927 }
928
929 if s != nil {
930 dematerializeGCProg(s)
931 }
932 }
933
934
935
936 for state.head != nil {
937 x := state.head
938 state.head = x.next
939 if stackTraceDebug {
940 for i := 0; i < x.nobj; i++ {
941 obj := &x.obj[i]
942 if obj.typ == nil {
943 continue
944 }
945 println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of type", obj.typ.string())
946
947 }
948 }
949 x.nobj = 0
950 putempty((*workbuf)(unsafe.Pointer(x)))
951 }
952 if state.buf != nil || state.cbuf != nil || state.freeBuf != nil {
953 throw("remaining pointer buffers")
954 }
955 }
956
957
958
959 func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
960 if _DebugGC > 1 && frame.continpc != 0 {
961 print("scanframe ", funcname(frame.fn), "\n")
962 }
963
964 isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == funcID_asyncPreempt
965 isDebugCall := frame.fn.valid() && frame.fn.funcID == funcID_debugCallV1
966 if state.conservative || isAsyncPreempt || isDebugCall {
967 if debugScanConservative {
968 println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc))
969 }
970
971
972
973
974
975
976
977
978
979 if frame.varp != 0 {
980 size := frame.varp - frame.sp
981 if size > 0 {
982 scanConservative(frame.sp, size, nil, gcw, state)
983 }
984 }
985
986
987 if frame.arglen != 0 {
988
989
990 scanConservative(frame.argp, frame.arglen, nil, gcw, state)
991 }
992
993 if isAsyncPreempt || isDebugCall {
994
995
996
997
998 state.conservative = true
999 } else {
1000
1001
1002
1003 state.conservative = false
1004 }
1005 return
1006 }
1007
1008 locals, args, objs := getStackMap(frame, &state.cache, false)
1009
1010
1011 if locals.n > 0 {
1012 size := uintptr(locals.n) * sys.PtrSize
1013 scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
1014 }
1015
1016
1017 if args.n > 0 {
1018 scanblock(frame.argp, uintptr(args.n)*sys.PtrSize, args.bytedata, gcw, state)
1019 }
1020
1021
1022 if frame.varp != 0 {
1023
1024
1025
1026 for _, obj := range objs {
1027 off := obj.off
1028 base := frame.varp
1029 if off >= 0 {
1030 base = frame.argp
1031 }
1032 ptr := base + uintptr(off)
1033 if ptr < frame.sp {
1034
1035 continue
1036 }
1037 if stackTraceDebug {
1038 println("stkobj at", hex(ptr), "of type", obj.typ.string())
1039 }
1040 state.addObject(ptr, obj.typ)
1041 }
1042 }
1043 }
1044
1045 type gcDrainFlags int
1046
1047 const (
1048 gcDrainUntilPreempt gcDrainFlags = 1 << iota
1049 gcDrainFlushBgCredit
1050 gcDrainIdle
1051 gcDrainFractional
1052 )
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076 func gcDrain(gcw *gcWork, flags gcDrainFlags) {
1077 if !writeBarrier.needed {
1078 throw("gcDrain phase incorrect")
1079 }
1080
1081 gp := getg().m.curg
1082 preemptible := flags&gcDrainUntilPreempt != 0
1083 flushBgCredit := flags&gcDrainFlushBgCredit != 0
1084 idle := flags&gcDrainIdle != 0
1085
1086 initScanWork := gcw.scanWork
1087
1088
1089
1090 checkWork := int64(1<<63 - 1)
1091 var check func() bool
1092 if flags&(gcDrainIdle|gcDrainFractional) != 0 {
1093 checkWork = initScanWork + drainCheckThreshold
1094 if idle {
1095 check = pollWork
1096 } else if flags&gcDrainFractional != 0 {
1097 check = pollFractionalWorkerExit
1098 }
1099 }
1100
1101
1102 if work.markrootNext < work.markrootJobs {
1103
1104 for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) {
1105 job := atomic.Xadd(&work.markrootNext, +1) - 1
1106 if job >= work.markrootJobs {
1107 break
1108 }
1109 markroot(gcw, job)
1110 if check != nil && check() {
1111 goto done
1112 }
1113 }
1114 }
1115
1116
1117
1118 for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) {
1119
1120
1121
1122
1123
1124 if work.full == 0 {
1125 gcw.balance()
1126 }
1127
1128 b := gcw.tryGetFast()
1129 if b == 0 {
1130 b = gcw.tryGet()
1131 if b == 0 {
1132
1133
1134
1135 wbBufFlush(nil, 0)
1136 b = gcw.tryGet()
1137 }
1138 }
1139 if b == 0 {
1140
1141 break
1142 }
1143 scanobject(b, gcw)
1144
1145
1146
1147
1148 if gcw.scanWork >= gcCreditSlack {
1149 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
1150 if flushBgCredit {
1151 gcFlushBgCredit(gcw.scanWork - initScanWork)
1152 initScanWork = 0
1153 }
1154 checkWork -= gcw.scanWork
1155 gcw.scanWork = 0
1156
1157 if checkWork <= 0 {
1158 checkWork += drainCheckThreshold
1159 if check != nil && check() {
1160 break
1161 }
1162 }
1163 }
1164 }
1165
1166 done:
1167
1168 if gcw.scanWork > 0 {
1169 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
1170 if flushBgCredit {
1171 gcFlushBgCredit(gcw.scanWork - initScanWork)
1172 }
1173 gcw.scanWork = 0
1174 }
1175 }
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 func gcDrainN(gcw *gcWork, scanWork int64) int64 {
1191 if !writeBarrier.needed {
1192 throw("gcDrainN phase incorrect")
1193 }
1194
1195
1196
1197 workFlushed := -gcw.scanWork
1198
1199 gp := getg().m.curg
1200 for !gp.preempt && workFlushed+gcw.scanWork < scanWork {
1201
1202 if work.full == 0 {
1203 gcw.balance()
1204 }
1205
1206
1207
1208
1209
1210
1211 b := gcw.tryGetFast()
1212 if b == 0 {
1213 b = gcw.tryGet()
1214 if b == 0 {
1215
1216
1217 wbBufFlush(nil, 0)
1218 b = gcw.tryGet()
1219 }
1220 }
1221
1222 if b == 0 {
1223
1224
1225
1226
1227 if work.markrootNext < work.markrootJobs {
1228 job := atomic.Xadd(&work.markrootNext, +1) - 1
1229 if job < work.markrootJobs {
1230 markroot(gcw, job)
1231 continue
1232 }
1233 }
1234
1235 break
1236 }
1237 scanobject(b, gcw)
1238
1239
1240 if gcw.scanWork >= gcCreditSlack {
1241 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
1242 workFlushed += gcw.scanWork
1243 gcw.scanWork = 0
1244 }
1245 }
1246
1247
1248
1249
1250
1251 return workFlushed + gcw.scanWork
1252 }
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) {
1263
1264
1265
1266 b := b0
1267 n := n0
1268
1269 for i := uintptr(0); i < n; {
1270
1271 bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
1272 if bits == 0 {
1273 i += sys.PtrSize * 8
1274 continue
1275 }
1276 for j := 0; j < 8 && i < n; j++ {
1277 if bits&1 != 0 {
1278
1279 p := *(*uintptr)(unsafe.Pointer(b + i))
1280 if p != 0 {
1281 if obj, span, objIndex := findObject(p, b, i); obj != 0 {
1282 greyobject(obj, b, i, span, gcw, objIndex)
1283 } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi {
1284 stk.putPtr(p, false)
1285 }
1286 }
1287 }
1288 bits >>= 1
1289 i += sys.PtrSize
1290 }
1291 }
1292 }
1293
1294
1295
1296
1297
1298
1299
1300 func scanobject(b uintptr, gcw *gcWork) {
1301
1302
1303
1304
1305
1306 hbits := heapBitsForAddr(b)
1307 s := spanOfUnchecked(b)
1308 n := s.elemsize
1309 if n == 0 {
1310 throw("scanobject n == 0")
1311 }
1312
1313 if n > maxObletBytes {
1314
1315
1316 if b == s.base() {
1317
1318
1319
1320
1321
1322 if s.spanclass.noscan() {
1323
1324 gcw.bytesMarked += uint64(n)
1325 return
1326 }
1327
1328
1329
1330
1331
1332
1333 for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
1334 if !gcw.putFast(oblet) {
1335 gcw.put(oblet)
1336 }
1337 }
1338 }
1339
1340
1341
1342
1343 n = s.base() + s.elemsize - b
1344 if n > maxObletBytes {
1345 n = maxObletBytes
1346 }
1347 }
1348
1349 var i uintptr
1350 for i = 0; i < n; i += sys.PtrSize {
1351
1352 if i != 0 {
1353
1354 hbits = hbits.next()
1355 }
1356
1357 bits := hbits.bits()
1358
1359
1360
1361
1362 if i != 1*sys.PtrSize && bits&bitScan == 0 {
1363 break
1364 }
1365 if bits&bitPointer == 0 {
1366 continue
1367 }
1368
1369
1370
1371 obj := *(*uintptr)(unsafe.Pointer(b + i))
1372
1373
1374
1375 if obj != 0 && obj-b >= n {
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385 if obj, span, objIndex := findObject(obj, b, i); obj != 0 {
1386 greyobject(obj, b, i, span, gcw, objIndex)
1387 }
1388 }
1389 }
1390 gcw.bytesMarked += uint64(n)
1391 gcw.scanWork += int64(i)
1392 }
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402 func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) {
1403 if debugScanConservative {
1404 printlock()
1405 print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
1406 hexdumpWords(b, b+n, func(p uintptr) byte {
1407 if ptrmask != nil {
1408 word := (p - b) / sys.PtrSize
1409 bits := *addb(ptrmask, word/8)
1410 if (bits>>(word%8))&1 == 0 {
1411 return '$'
1412 }
1413 }
1414
1415 val := *(*uintptr)(unsafe.Pointer(p))
1416 if state != nil && state.stack.lo <= val && val < state.stack.hi {
1417 return '@'
1418 }
1419
1420 span := spanOfHeap(val)
1421 if span == nil {
1422 return ' '
1423 }
1424 idx := span.objIndex(val)
1425 if span.isFree(idx) {
1426 return ' '
1427 }
1428 return '*'
1429 })
1430 printunlock()
1431 }
1432
1433 for i := uintptr(0); i < n; i += sys.PtrSize {
1434 if ptrmask != nil {
1435 word := i / sys.PtrSize
1436 bits := *addb(ptrmask, word/8)
1437 if bits == 0 {
1438
1439
1440
1441
1442
1443
1444 if i%(sys.PtrSize*8) != 0 {
1445 throw("misaligned mask")
1446 }
1447 i += sys.PtrSize*8 - sys.PtrSize
1448 continue
1449 }
1450 if (bits>>(word%8))&1 == 0 {
1451 continue
1452 }
1453 }
1454
1455 val := *(*uintptr)(unsafe.Pointer(b + i))
1456
1457
1458 if state != nil && state.stack.lo <= val && val < state.stack.hi {
1459
1460
1461
1462
1463
1464
1465
1466
1467 state.putPtr(val, true)
1468 continue
1469 }
1470
1471
1472 span := spanOfHeap(val)
1473 if span == nil {
1474 continue
1475 }
1476
1477
1478 idx := span.objIndex(val)
1479 if span.isFree(idx) {
1480 continue
1481 }
1482
1483
1484 obj := span.base() + idx*span.elemsize
1485 greyobject(obj, b, i, span, gcw, idx)
1486 }
1487 }
1488
1489
1490
1491
1492
1493 func shade(b uintptr) {
1494 if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {
1495 gcw := &getg().m.p.ptr().gcw
1496 greyobject(obj, 0, 0, span, gcw, objIndex)
1497 }
1498 }
1499
1500
1501
1502
1503
1504
1505
1506
1507 func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
1508
1509 if obj&(sys.PtrSize-1) != 0 {
1510 throw("greyobject: obj not pointer-aligned")
1511 }
1512 mbits := span.markBitsForIndex(objIndex)
1513
1514 if useCheckmark {
1515 if !mbits.isMarked() {
1516 printlock()
1517 print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n")
1518 print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n")
1519
1520
1521 gcDumpObject("base", base, off)
1522
1523
1524 gcDumpObject("obj", obj, ^uintptr(0))
1525
1526 getg().m.traceback = 2
1527 throw("checkmark found unmarked object")
1528 }
1529 hbits := heapBitsForAddr(obj)
1530 if hbits.isCheckmarked(span.elemsize) {
1531 return
1532 }
1533 hbits.setCheckmarked(span.elemsize)
1534 if !hbits.isCheckmarked(span.elemsize) {
1535 throw("setCheckmarked and isCheckmarked disagree")
1536 }
1537 } else {
1538 if debug.gccheckmark > 0 && span.isFree(objIndex) {
1539 print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
1540 gcDumpObject("base", base, off)
1541 gcDumpObject("obj", obj, ^uintptr(0))
1542 getg().m.traceback = 2
1543 throw("marking free object")
1544 }
1545
1546
1547 if mbits.isMarked() {
1548 return
1549 }
1550 mbits.setMarked()
1551
1552
1553 arena, pageIdx, pageMask := pageIndexOf(span.base())
1554 if arena.pageMarks[pageIdx]&pageMask == 0 {
1555 atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
1556 }
1557
1558
1559
1560 if span.spanclass.noscan() {
1561 gcw.bytesMarked += uint64(span.elemsize)
1562 return
1563 }
1564 }
1565
1566
1567
1568
1569
1570
1571
1572 if !gcw.putFast(obj) {
1573 gcw.put(obj)
1574 }
1575 }
1576
1577
1578
1579 func gcDumpObject(label string, obj, off uintptr) {
1580 s := spanOf(obj)
1581 print(label, "=", hex(obj))
1582 if s == nil {
1583 print(" s=nil\n")
1584 return
1585 }
1586 print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
1587 if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) {
1588 print(mSpanStateNames[state], "\n")
1589 } else {
1590 print("unknown(", state, ")\n")
1591 }
1592
1593 skipped := false
1594 size := s.elemsize
1595 if s.state.get() == mSpanManual && size == 0 {
1596
1597
1598
1599 size = off + sys.PtrSize
1600 }
1601 for i := uintptr(0); i < size; i += sys.PtrSize {
1602
1603
1604
1605 if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) {
1606 skipped = true
1607 continue
1608 }
1609 if skipped {
1610 print(" ...\n")
1611 skipped = false
1612 }
1613 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
1614 if i == off {
1615 print(" <==")
1616 }
1617 print("\n")
1618 }
1619 if skipped {
1620 print(" ...\n")
1621 }
1622 }
1623
1624
1625
1626
1627
1628
1629
1630
1631 func gcmarknewobject(span *mspan, obj, size, scanSize uintptr) {
1632 if useCheckmark {
1633 throw("gcmarknewobject called while doing checkmark")
1634 }
1635
1636
1637 objIndex := span.objIndex(obj)
1638 span.markBitsForIndex(objIndex).setMarked()
1639
1640
1641 arena, pageIdx, pageMask := pageIndexOf(span.base())
1642 if arena.pageMarks[pageIdx]&pageMask == 0 {
1643 atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
1644 }
1645
1646 gcw := &getg().m.p.ptr().gcw
1647 gcw.bytesMarked += uint64(size)
1648 gcw.scanWork += int64(scanSize)
1649 }
1650
1651
1652
1653
1654 func gcMarkTinyAllocs() {
1655 for _, p := range allp {
1656 c := p.mcache
1657 if c == nil || c.tiny == 0 {
1658 continue
1659 }
1660 _, span, objIndex := findObject(c.tiny, 0, 0)
1661 gcw := &p.gcw
1662 greyobject(c.tiny, 0, 0, span, gcw, objIndex)
1663 }
1664 }
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687 var useCheckmark = false
1688
1689
1690 func initCheckmarks() {
1691 useCheckmark = true
1692 for _, s := range mheap_.allspans {
1693 if s.state.get() == mSpanInUse {
1694 heapBitsForAddr(s.base()).initCheckmarkSpan(s.layout())
1695 }
1696 }
1697 }
1698
1699 func clearCheckmarks() {
1700 useCheckmark = false
1701 for _, s := range mheap_.allspans {
1702 if s.state.get() == mSpanInUse {
1703 heapBitsForAddr(s.base()).clearCheckmarkSpan(s.layout())
1704 }
1705 }
1706 }
1707
View as plain text