Source file
src/runtime/mgcsweep.go
Documentation: runtime
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25 package runtime
26
27 import (
28 "runtime/internal/atomic"
29 "unsafe"
30 )
31
32 var sweep sweepdata
33
34
35 type sweepdata struct {
36 lock mutex
37 g *g
38 parked bool
39 started bool
40
41 nbgsweep uint32
42 npausesweep uint32
43
44
45
46
47
48
49
50
51 centralIndex sweepClass
52 }
53
54
55
56 type sweepClass uint32
57
58 const (
59 numSweepClasses = numSpanClasses * 2
60 sweepClassDone sweepClass = sweepClass(^uint32(0))
61 )
62
63 func (s *sweepClass) load() sweepClass {
64 return sweepClass(atomic.Load((*uint32)(s)))
65 }
66
67 func (s *sweepClass) update(sNew sweepClass) {
68
69
70 sOld := s.load()
71 for sOld < sNew && !atomic.Cas((*uint32)(s), uint32(sOld), uint32(sNew)) {
72 sOld = s.load()
73 }
74
75
76
77
78
79 }
80
81 func (s *sweepClass) clear() {
82 atomic.Store((*uint32)(s), 0)
83 }
84
85
86
87
88
89 func (s sweepClass) split() (spc spanClass, full bool) {
90 return spanClass(s >> 1), s&1 == 0
91 }
92
93
94
95
96 func (h *mheap) nextSpanForSweep() *mspan {
97 sg := h.sweepgen
98 for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ {
99 spc, full := sc.split()
100 c := &h.central[spc].mcentral
101 var s *mspan
102 if full {
103 s = c.fullUnswept(sg).pop()
104 } else {
105 s = c.partialUnswept(sg).pop()
106 }
107 if s != nil {
108
109
110 sweep.centralIndex.update(sc)
111 return s
112 }
113 }
114
115 sweep.centralIndex.update(sweepClassDone)
116 return nil
117 }
118
119
120
121
122
123
124
125 func finishsweep_m() {
126
127
128
129
130
131 for sweepone() != ^uintptr(0) {
132 sweep.npausesweep++
133 }
134
135 if go115NewMCentralImpl {
136
137
138
139
140 sg := mheap_.sweepgen
141 for i := range mheap_.central {
142 c := &mheap_.central[i].mcentral
143 c.partialUnswept(sg).reset()
144 c.fullUnswept(sg).reset()
145 }
146 }
147
148
149
150
151 wakeScavenger()
152
153 nextMarkBitArenaEpoch()
154 }
155
156 func bgsweep(c chan int) {
157 sweep.g = getg()
158
159 lockInit(&sweep.lock, lockRankSweep)
160 lock(&sweep.lock)
161 sweep.parked = true
162 c <- 1
163 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
164
165 for {
166 for sweepone() != ^uintptr(0) {
167 sweep.nbgsweep++
168 Gosched()
169 }
170 for freeSomeWbufs(true) {
171 Gosched()
172 }
173 lock(&sweep.lock)
174 if !isSweepDone() {
175
176
177
178 unlock(&sweep.lock)
179 continue
180 }
181 sweep.parked = true
182 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
183 }
184 }
185
186
187
188 func sweepone() uintptr {
189 _g_ := getg()
190 sweepRatio := mheap_.sweepPagesPerByte
191
192
193
194 _g_.m.locks++
195 if atomic.Load(&mheap_.sweepdone) != 0 {
196 _g_.m.locks--
197 return ^uintptr(0)
198 }
199 atomic.Xadd(&mheap_.sweepers, +1)
200
201
202 var s *mspan
203 sg := mheap_.sweepgen
204 for {
205 if go115NewMCentralImpl {
206 s = mheap_.nextSpanForSweep()
207 } else {
208 s = mheap_.sweepSpans[1-sg/2%2].pop()
209 }
210 if s == nil {
211 atomic.Store(&mheap_.sweepdone, 1)
212 break
213 }
214 if state := s.state.get(); state != mSpanInUse {
215
216
217
218 if !(s.sweepgen == sg || s.sweepgen == sg+3) {
219 print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sg, "\n")
220 throw("non in-use span in unswept list")
221 }
222 continue
223 }
224 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
225 break
226 }
227 }
228
229
230 npages := ^uintptr(0)
231 if s != nil {
232 npages = s.npages
233 if s.sweep(false) {
234
235
236
237 atomic.Xadduintptr(&mheap_.reclaimCredit, npages)
238 } else {
239
240
241
242 npages = 0
243 }
244 }
245
246
247
248 if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
249
250
251
252
253
254
255
256
257
258
259
260 systemstack(func() {
261 lock(&mheap_.lock)
262 mheap_.pages.scavengeStartGen()
263 unlock(&mheap_.lock)
264 })
265
266
267
268 readyForScavenger()
269
270 if debug.gcpacertrace > 0 {
271 print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", (memstats.heap_live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", sweepRatio, " pages/byte\n")
272 }
273 }
274 _g_.m.locks--
275 return npages
276 }
277
278
279
280
281
282
283
284 func isSweepDone() bool {
285 return mheap_.sweepdone != 0
286 }
287
288
289
290 func (s *mspan) ensureSwept() {
291
292
293
294 _g_ := getg()
295 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
296 throw("mspan.ensureSwept: m is not locked")
297 }
298
299 sg := mheap_.sweepgen
300 spangen := atomic.Load(&s.sweepgen)
301 if spangen == sg || spangen == sg+3 {
302 return
303 }
304
305 if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
306 s.sweep(false)
307 return
308 }
309
310 for {
311 spangen := atomic.Load(&s.sweepgen)
312 if spangen == sg || spangen == sg+3 {
313 break
314 }
315 osyield()
316 }
317 }
318
319
320
321
322
323
324 func (s *mspan) sweep(preserve bool) bool {
325 if !go115NewMCentralImpl {
326 return s.oldSweep(preserve)
327 }
328
329
330 _g_ := getg()
331 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
332 throw("mspan.sweep: m is not locked")
333 }
334 sweepgen := mheap_.sweepgen
335 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
336 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
337 throw("mspan.sweep: bad span state")
338 }
339
340 if trace.enabled {
341 traceGCSweepSpan(s.npages * _PageSize)
342 }
343
344 atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
345
346 spc := s.spanclass
347 size := s.elemsize
348
349 c := _g_.m.p.ptr().mcache
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367 hadSpecials := s.specials != nil
368 specialp := &s.specials
369 special := *specialp
370 for special != nil {
371
372 objIndex := uintptr(special.offset) / size
373 p := s.base() + objIndex*size
374 mbits := s.markBitsForIndex(objIndex)
375 if !mbits.isMarked() {
376
377
378 hasFin := false
379 endOffset := p - s.base() + size
380 for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
381 if tmp.kind == _KindSpecialFinalizer {
382
383 mbits.setMarkedNonAtomic()
384 hasFin = true
385 break
386 }
387 }
388
389 for special != nil && uintptr(special.offset) < endOffset {
390
391
392 p := s.base() + uintptr(special.offset)
393 if special.kind == _KindSpecialFinalizer || !hasFin {
394
395 y := special
396 special = special.next
397 *specialp = special
398 freespecial(y, unsafe.Pointer(p), size)
399 } else {
400
401
402 specialp = &special.next
403 special = *specialp
404 }
405 }
406 } else {
407
408 specialp = &special.next
409 special = *specialp
410 }
411 }
412 if hadSpecials && s.specials == nil {
413 spanHasNoSpecials(s)
414 }
415
416 if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled {
417
418
419 mbits := s.markBitsForBase()
420 abits := s.allocBitsForIndex(0)
421 for i := uintptr(0); i < s.nelems; i++ {
422 if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
423 x := s.base() + i*s.elemsize
424 if debug.allocfreetrace != 0 {
425 tracefree(unsafe.Pointer(x), size)
426 }
427 if debug.clobberfree != 0 {
428 clobberfree(unsafe.Pointer(x), size)
429 }
430 if raceenabled {
431 racefree(unsafe.Pointer(x), size)
432 }
433 if msanenabled {
434 msanfree(unsafe.Pointer(x), size)
435 }
436 }
437 mbits.advance()
438 abits.advance()
439 }
440 }
441
442
443 if s.freeindex < s.nelems {
444
445
446
447
448
449 obj := s.freeindex
450 if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
451 s.reportZombies()
452 }
453
454 for i := obj/8 + 1; i < divRoundUp(s.nelems, 8); i++ {
455 if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
456 s.reportZombies()
457 }
458 }
459 }
460
461
462 nalloc := uint16(s.countAlloc())
463 nfreed := s.allocCount - nalloc
464 if nalloc > s.allocCount {
465
466
467 print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
468 throw("sweep increased allocation count")
469 }
470
471 s.allocCount = nalloc
472 s.freeindex = 0
473 if trace.enabled {
474 getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
475 }
476
477
478
479 s.allocBits = s.gcmarkBits
480 s.gcmarkBits = newMarkBits(s.nelems)
481
482
483 s.refillAllocCache(0)
484
485
486
487 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
488 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
489 throw("mspan.sweep: bad span state after sweep")
490 }
491 if s.sweepgen == sweepgen+1 || s.sweepgen == sweepgen+3 {
492 throw("swept cached span")
493 }
494
495
496
497
498
499
500
501
502
503
504
505 atomic.Store(&s.sweepgen, sweepgen)
506
507 if spc.sizeclass() != 0 {
508
509 if nfreed > 0 {
510
511
512
513
514 s.needzero = 1
515 c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
516 }
517 if !preserve {
518
519
520
521
522
523 if nalloc == 0 {
524
525 mheap_.freeSpan(s)
526 return true
527 }
528
529 if uintptr(nalloc) == s.nelems {
530 mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
531 } else {
532 mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
533 }
534 }
535 } else if !preserve {
536
537 if nfreed != 0 {
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554 if debug.efence > 0 {
555 s.limit = 0
556 sysFault(unsafe.Pointer(s.base()), size)
557 } else {
558 mheap_.freeSpan(s)
559 }
560 c.local_nlargefree++
561 c.local_largefree += size
562 return true
563 }
564
565
566 mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
567 }
568 return false
569 }
570
571
572
573
574
575
576
577
578 func (s *mspan) oldSweep(preserve bool) bool {
579
580
581 _g_ := getg()
582 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
583 throw("mspan.sweep: m is not locked")
584 }
585 sweepgen := mheap_.sweepgen
586 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
587 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
588 throw("mspan.sweep: bad span state")
589 }
590
591 if trace.enabled {
592 traceGCSweepSpan(s.npages * _PageSize)
593 }
594
595 atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
596
597 spc := s.spanclass
598 size := s.elemsize
599 res := false
600
601 c := _g_.m.p.ptr().mcache
602 freeToHeap := false
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620 hadSpecials := s.specials != nil
621 specialp := &s.specials
622 special := *specialp
623 for special != nil {
624
625 objIndex := uintptr(special.offset) / size
626 p := s.base() + objIndex*size
627 mbits := s.markBitsForIndex(objIndex)
628 if !mbits.isMarked() {
629
630
631 hasFin := false
632 endOffset := p - s.base() + size
633 for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
634 if tmp.kind == _KindSpecialFinalizer {
635
636 mbits.setMarkedNonAtomic()
637 hasFin = true
638 break
639 }
640 }
641
642 for special != nil && uintptr(special.offset) < endOffset {
643
644
645 p := s.base() + uintptr(special.offset)
646 if special.kind == _KindSpecialFinalizer || !hasFin {
647
648 y := special
649 special = special.next
650 *specialp = special
651 freespecial(y, unsafe.Pointer(p), size)
652 } else {
653
654
655 specialp = &special.next
656 special = *specialp
657 }
658 }
659 } else {
660
661 specialp = &special.next
662 special = *specialp
663 }
664 }
665 if go115NewMarkrootSpans && hadSpecials && s.specials == nil {
666 spanHasNoSpecials(s)
667 }
668
669 if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled {
670
671
672 mbits := s.markBitsForBase()
673 abits := s.allocBitsForIndex(0)
674 for i := uintptr(0); i < s.nelems; i++ {
675 if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
676 x := s.base() + i*s.elemsize
677 if debug.allocfreetrace != 0 {
678 tracefree(unsafe.Pointer(x), size)
679 }
680 if debug.clobberfree != 0 {
681 clobberfree(unsafe.Pointer(x), size)
682 }
683 if raceenabled {
684 racefree(unsafe.Pointer(x), size)
685 }
686 if msanenabled {
687 msanfree(unsafe.Pointer(x), size)
688 }
689 }
690 mbits.advance()
691 abits.advance()
692 }
693 }
694
695
696 nalloc := uint16(s.countAlloc())
697 if spc.sizeclass() == 0 && nalloc == 0 {
698 s.needzero = 1
699 freeToHeap = true
700 }
701 nfreed := s.allocCount - nalloc
702 if nalloc > s.allocCount {
703 print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
704 throw("sweep increased allocation count")
705 }
706
707 s.allocCount = nalloc
708 wasempty := s.nextFreeIndex() == s.nelems
709 s.freeindex = 0
710 if trace.enabled {
711 getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
712 }
713
714
715
716 s.allocBits = s.gcmarkBits
717 s.gcmarkBits = newMarkBits(s.nelems)
718
719
720 s.refillAllocCache(0)
721
722
723
724
725
726
727 if freeToHeap || nfreed == 0 {
728
729
730 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
731 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
732 throw("mspan.sweep: bad span state after sweep")
733 }
734
735
736
737 atomic.Store(&s.sweepgen, sweepgen)
738 }
739
740 if nfreed > 0 && spc.sizeclass() != 0 {
741 c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
742 res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty)
743
744 } else if freeToHeap {
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761 if debug.efence > 0 {
762 s.limit = 0
763 sysFault(unsafe.Pointer(s.base()), size)
764 } else {
765 mheap_.freeSpan(s)
766 }
767 c.local_nlargefree++
768 c.local_largefree += size
769 res = true
770 }
771 if !res {
772
773
774 mheap_.sweepSpans[sweepgen/2%2].push(s)
775 }
776 return res
777 }
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793 func (s *mspan) reportZombies() {
794 printlock()
795 print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n")
796 mbits := s.markBitsForBase()
797 abits := s.allocBitsForIndex(0)
798 for i := uintptr(0); i < s.nelems; i++ {
799 addr := s.base() + i*s.elemsize
800 print(hex(addr))
801 alloc := i < s.freeindex || abits.isMarked()
802 if alloc {
803 print(" alloc")
804 } else {
805 print(" free ")
806 }
807 if mbits.isMarked() {
808 print(" marked ")
809 } else {
810 print(" unmarked")
811 }
812 zombie := mbits.isMarked() && !alloc
813 if zombie {
814 print(" zombie")
815 }
816 print("\n")
817 if zombie {
818 length := s.elemsize
819 if length > 1024 {
820 length = 1024
821 }
822 hexdumpWords(addr, addr+length, nil)
823 }
824 mbits.advance()
825 abits.advance()
826 }
827 throw("found pointer to free object")
828 }
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847 func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
848 if mheap_.sweepPagesPerByte == 0 {
849
850 return
851 }
852
853 if trace.enabled {
854 traceGCSweepStart()
855 }
856
857 retry:
858 sweptBasis := atomic.Load64(&mheap_.pagesSweptBasis)
859
860
861 newHeapLive := uintptr(atomic.Load64(&memstats.heap_live)-mheap_.sweepHeapLiveBasis) + spanBytes
862 pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
863 for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)-sweptBasis) {
864 if sweepone() == ^uintptr(0) {
865 mheap_.sweepPagesPerByte = 0
866 break
867 }
868 if atomic.Load64(&mheap_.pagesSweptBasis) != sweptBasis {
869
870 goto retry
871 }
872 }
873
874 if trace.enabled {
875 traceGCSweepDone()
876 }
877 }
878
879
880
881 func clobberfree(x unsafe.Pointer, size uintptr) {
882
883 for i := uintptr(0); i < size; i += 4 {
884 *(*uint32)(add(x, i)) = 0xdeadbeef
885 }
886 }
887
View as plain text