Source file
src/runtime/mgcsweep.go
Documentation: runtime
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25 package runtime
26
27 import (
28 "runtime/internal/atomic"
29 "unsafe"
30 )
31
32 var sweep sweepdata
33
34
35 type sweepdata struct {
36 lock mutex
37 g *g
38 parked bool
39 started bool
40
41 nbgsweep uint32
42 npausesweep uint32
43
44
45
46
47
48
49
50
51 centralIndex sweepClass
52 }
53
54
55
56 type sweepClass uint32
57
58 const (
59 numSweepClasses = numSpanClasses * 2
60 sweepClassDone sweepClass = sweepClass(^uint32(0))
61 )
62
63 func (s *sweepClass) load() sweepClass {
64 return sweepClass(atomic.Load((*uint32)(s)))
65 }
66
67 func (s *sweepClass) update(sNew sweepClass) {
68
69
70 sOld := s.load()
71 for sOld < sNew && !atomic.Cas((*uint32)(s), uint32(sOld), uint32(sNew)) {
72 sOld = s.load()
73 }
74
75
76
77
78
79 }
80
81 func (s *sweepClass) clear() {
82 atomic.Store((*uint32)(s), 0)
83 }
84
85
86
87
88
89 func (s sweepClass) split() (spc spanClass, full bool) {
90 return spanClass(s >> 1), s&1 == 0
91 }
92
93
94
95
96 func (h *mheap) nextSpanForSweep() *mspan {
97 sg := h.sweepgen
98 for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ {
99 spc, full := sc.split()
100 c := &h.central[spc].mcentral
101 var s *mspan
102 if full {
103 s = c.fullUnswept(sg).pop()
104 } else {
105 s = c.partialUnswept(sg).pop()
106 }
107 if s != nil {
108
109
110 sweep.centralIndex.update(sc)
111 return s
112 }
113 }
114
115 sweep.centralIndex.update(sweepClassDone)
116 return nil
117 }
118
119
120
121
122
123
124
125 func finishsweep_m() {
126 assertWorldStopped()
127
128
129
130
131
132
133 for sweepone() != ^uintptr(0) {
134 sweep.npausesweep++
135 }
136
137
138
139
140
141 sg := mheap_.sweepgen
142 for i := range mheap_.central {
143 c := &mheap_.central[i].mcentral
144 c.partialUnswept(sg).reset()
145 c.fullUnswept(sg).reset()
146 }
147
148
149
150
151 wakeScavenger()
152
153 nextMarkBitArenaEpoch()
154 }
155
156 func bgsweep(c chan int) {
157 sweep.g = getg()
158
159 lockInit(&sweep.lock, lockRankSweep)
160 lock(&sweep.lock)
161 sweep.parked = true
162 c <- 1
163 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
164
165 for {
166 for sweepone() != ^uintptr(0) {
167 sweep.nbgsweep++
168 Gosched()
169 }
170 for freeSomeWbufs(true) {
171 Gosched()
172 }
173 lock(&sweep.lock)
174 if !isSweepDone() {
175
176
177
178 unlock(&sweep.lock)
179 continue
180 }
181 sweep.parked = true
182 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
183 }
184 }
185
186
187
188 func sweepone() uintptr {
189 _g_ := getg()
190 sweepRatio := mheap_.sweepPagesPerByte
191
192
193
194 _g_.m.locks++
195 if atomic.Load(&mheap_.sweepdone) != 0 {
196 _g_.m.locks--
197 return ^uintptr(0)
198 }
199 atomic.Xadd(&mheap_.sweepers, +1)
200
201
202 var s *mspan
203 sg := mheap_.sweepgen
204 for {
205 s = mheap_.nextSpanForSweep()
206 if s == nil {
207 atomic.Store(&mheap_.sweepdone, 1)
208 break
209 }
210 if state := s.state.get(); state != mSpanInUse {
211
212
213
214 if !(s.sweepgen == sg || s.sweepgen == sg+3) {
215 print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sg, "\n")
216 throw("non in-use span in unswept list")
217 }
218 continue
219 }
220 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
221 break
222 }
223 }
224
225
226 npages := ^uintptr(0)
227 if s != nil {
228 npages = s.npages
229 if s.sweep(false) {
230
231
232
233 atomic.Xadduintptr(&mheap_.reclaimCredit, npages)
234 } else {
235
236
237
238 npages = 0
239 }
240 }
241
242
243
244 if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
245
246
247
248
249
250
251
252
253
254
255
256 systemstack(func() {
257 lock(&mheap_.lock)
258 mheap_.pages.scavengeStartGen()
259 unlock(&mheap_.lock)
260 })
261
262
263
264 readyForScavenger()
265
266 if debug.gcpacertrace > 0 {
267 print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", (memstats.heap_live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", sweepRatio, " pages/byte\n")
268 }
269 }
270 _g_.m.locks--
271 return npages
272 }
273
274
275
276
277
278
279
280 func isSweepDone() bool {
281 return mheap_.sweepdone != 0
282 }
283
284
285
286 func (s *mspan) ensureSwept() {
287
288
289
290 _g_ := getg()
291 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
292 throw("mspan.ensureSwept: m is not locked")
293 }
294
295 sg := mheap_.sweepgen
296 spangen := atomic.Load(&s.sweepgen)
297 if spangen == sg || spangen == sg+3 {
298 return
299 }
300
301 if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
302 s.sweep(false)
303 return
304 }
305
306 for {
307 spangen := atomic.Load(&s.sweepgen)
308 if spangen == sg || spangen == sg+3 {
309 break
310 }
311 osyield()
312 }
313 }
314
315
316
317
318
319
320 func (s *mspan) sweep(preserve bool) bool {
321
322
323 _g_ := getg()
324 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
325 throw("mspan.sweep: m is not locked")
326 }
327 sweepgen := mheap_.sweepgen
328 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
329 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
330 throw("mspan.sweep: bad span state")
331 }
332
333 if trace.enabled {
334 traceGCSweepSpan(s.npages * _PageSize)
335 }
336
337 atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
338
339 spc := s.spanclass
340 size := s.elemsize
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358 hadSpecials := s.specials != nil
359 specialp := &s.specials
360 special := *specialp
361 for special != nil {
362
363 objIndex := uintptr(special.offset) / size
364 p := s.base() + objIndex*size
365 mbits := s.markBitsForIndex(objIndex)
366 if !mbits.isMarked() {
367
368
369 hasFin := false
370 endOffset := p - s.base() + size
371 for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
372 if tmp.kind == _KindSpecialFinalizer {
373
374 mbits.setMarkedNonAtomic()
375 hasFin = true
376 break
377 }
378 }
379
380 for special != nil && uintptr(special.offset) < endOffset {
381
382
383 p := s.base() + uintptr(special.offset)
384 if special.kind == _KindSpecialFinalizer || !hasFin {
385
386 y := special
387 special = special.next
388 *specialp = special
389 freespecial(y, unsafe.Pointer(p), size)
390 } else {
391
392
393 specialp = &special.next
394 special = *specialp
395 }
396 }
397 } else {
398
399 specialp = &special.next
400 special = *specialp
401 }
402 }
403 if hadSpecials && s.specials == nil {
404 spanHasNoSpecials(s)
405 }
406
407 if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled {
408
409
410 mbits := s.markBitsForBase()
411 abits := s.allocBitsForIndex(0)
412 for i := uintptr(0); i < s.nelems; i++ {
413 if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
414 x := s.base() + i*s.elemsize
415 if debug.allocfreetrace != 0 {
416 tracefree(unsafe.Pointer(x), size)
417 }
418 if debug.clobberfree != 0 {
419 clobberfree(unsafe.Pointer(x), size)
420 }
421 if raceenabled {
422 racefree(unsafe.Pointer(x), size)
423 }
424 if msanenabled {
425 msanfree(unsafe.Pointer(x), size)
426 }
427 }
428 mbits.advance()
429 abits.advance()
430 }
431 }
432
433
434 if s.freeindex < s.nelems {
435
436
437
438
439
440 obj := s.freeindex
441 if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
442 s.reportZombies()
443 }
444
445 for i := obj/8 + 1; i < divRoundUp(s.nelems, 8); i++ {
446 if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
447 s.reportZombies()
448 }
449 }
450 }
451
452
453 nalloc := uint16(s.countAlloc())
454 nfreed := s.allocCount - nalloc
455 if nalloc > s.allocCount {
456
457
458 print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
459 throw("sweep increased allocation count")
460 }
461
462 s.allocCount = nalloc
463 s.freeindex = 0
464 if trace.enabled {
465 getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
466 }
467
468
469
470 s.allocBits = s.gcmarkBits
471 s.gcmarkBits = newMarkBits(s.nelems)
472
473
474 s.refillAllocCache(0)
475
476
477
478 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
479 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
480 throw("mspan.sweep: bad span state after sweep")
481 }
482 if s.sweepgen == sweepgen+1 || s.sweepgen == sweepgen+3 {
483 throw("swept cached span")
484 }
485
486
487
488
489
490
491
492
493
494
495
496 atomic.Store(&s.sweepgen, sweepgen)
497
498 if spc.sizeclass() != 0 {
499
500 if nfreed > 0 {
501
502
503
504
505 s.needzero = 1
506 stats := memstats.heapStats.acquire()
507 atomic.Xadduintptr(&stats.smallFreeCount[spc.sizeclass()], uintptr(nfreed))
508 memstats.heapStats.release()
509 }
510 if !preserve {
511
512
513
514
515
516 if nalloc == 0 {
517
518 mheap_.freeSpan(s)
519 return true
520 }
521
522 if uintptr(nalloc) == s.nelems {
523 mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
524 } else {
525 mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
526 }
527 }
528 } else if !preserve {
529
530 if nfreed != 0 {
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547 if debug.efence > 0 {
548 s.limit = 0
549 sysFault(unsafe.Pointer(s.base()), size)
550 } else {
551 mheap_.freeSpan(s)
552 }
553 stats := memstats.heapStats.acquire()
554 atomic.Xadduintptr(&stats.largeFreeCount, 1)
555 atomic.Xadduintptr(&stats.largeFree, size)
556 memstats.heapStats.release()
557 return true
558 }
559
560
561 mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
562 }
563 return false
564 }
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580 func (s *mspan) reportZombies() {
581 printlock()
582 print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n")
583 mbits := s.markBitsForBase()
584 abits := s.allocBitsForIndex(0)
585 for i := uintptr(0); i < s.nelems; i++ {
586 addr := s.base() + i*s.elemsize
587 print(hex(addr))
588 alloc := i < s.freeindex || abits.isMarked()
589 if alloc {
590 print(" alloc")
591 } else {
592 print(" free ")
593 }
594 if mbits.isMarked() {
595 print(" marked ")
596 } else {
597 print(" unmarked")
598 }
599 zombie := mbits.isMarked() && !alloc
600 if zombie {
601 print(" zombie")
602 }
603 print("\n")
604 if zombie {
605 length := s.elemsize
606 if length > 1024 {
607 length = 1024
608 }
609 hexdumpWords(addr, addr+length, nil)
610 }
611 mbits.advance()
612 abits.advance()
613 }
614 throw("found pointer to free object")
615 }
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634 func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
635 if mheap_.sweepPagesPerByte == 0 {
636
637 return
638 }
639
640 if trace.enabled {
641 traceGCSweepStart()
642 }
643
644 retry:
645 sweptBasis := atomic.Load64(&mheap_.pagesSweptBasis)
646
647
648 newHeapLive := uintptr(atomic.Load64(&memstats.heap_live)-mheap_.sweepHeapLiveBasis) + spanBytes
649 pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
650 for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)-sweptBasis) {
651 if sweepone() == ^uintptr(0) {
652 mheap_.sweepPagesPerByte = 0
653 break
654 }
655 if atomic.Load64(&mheap_.pagesSweptBasis) != sweptBasis {
656
657 goto retry
658 }
659 }
660
661 if trace.enabled {
662 traceGCSweepDone()
663 }
664 }
665
666
667
668 func clobberfree(x unsafe.Pointer, size uintptr) {
669
670 for i := uintptr(0); i < size; i += 4 {
671 *(*uint32)(add(x, i)) = 0xdeadbeef
672 }
673 }
674
View as plain text