Source file
src/runtime/mbitmap.go
Documentation: runtime
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75 package runtime
76
77 import (
78 "runtime/internal/atomic"
79 "runtime/internal/sys"
80 "unsafe"
81 )
82
83 const (
84 bitPointer = 1 << 0
85 bitScan = 1 << 4
86
87 heapBitsShift = 1
88 wordsPerBitmapByte = 8 / 2
89
90
91 bitScanAll = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
92 bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
93 )
94
95
96
97
98 func addb(p *byte, n uintptr) *byte {
99
100
101
102 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
103 }
104
105
106
107
108 func subtractb(p *byte, n uintptr) *byte {
109
110
111
112 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
113 }
114
115
116
117
118 func add1(p *byte) *byte {
119
120
121
122 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
123 }
124
125
126
127
128
129
130 func subtract1(p *byte) *byte {
131
132
133
134 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
135 }
136
137
138
139
140
141 type heapBits struct {
142 bitp *uint8
143 shift uint32
144 arena uint32
145 last *uint8
146 }
147
148
149
150 var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1}
151
152
153
154
155
156
157
158
159
160
161 type markBits struct {
162 bytep *uint8
163 mask uint8
164 index uintptr
165 }
166
167
168 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
169 bytep, mask := s.allocBits.bitp(allocBitIndex)
170 return markBits{bytep, mask, allocBitIndex}
171 }
172
173
174
175
176
177 func (s *mspan) refillAllocCache(whichByte uintptr) {
178 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
179 aCache := uint64(0)
180 aCache |= uint64(bytes[0])
181 aCache |= uint64(bytes[1]) << (1 * 8)
182 aCache |= uint64(bytes[2]) << (2 * 8)
183 aCache |= uint64(bytes[3]) << (3 * 8)
184 aCache |= uint64(bytes[4]) << (4 * 8)
185 aCache |= uint64(bytes[5]) << (5 * 8)
186 aCache |= uint64(bytes[6]) << (6 * 8)
187 aCache |= uint64(bytes[7]) << (7 * 8)
188 s.allocCache = ^aCache
189 }
190
191
192
193
194
195 func (s *mspan) nextFreeIndex() uintptr {
196 sfreeindex := s.freeindex
197 snelems := s.nelems
198 if sfreeindex == snelems {
199 return sfreeindex
200 }
201 if sfreeindex > snelems {
202 throw("s.freeindex > s.nelems")
203 }
204
205 aCache := s.allocCache
206
207 bitIndex := sys.Ctz64(aCache)
208 for bitIndex == 64 {
209
210 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
211 if sfreeindex >= snelems {
212 s.freeindex = snelems
213 return snelems
214 }
215 whichByte := sfreeindex / 8
216
217 s.refillAllocCache(whichByte)
218 aCache = s.allocCache
219 bitIndex = sys.Ctz64(aCache)
220
221
222 }
223 result := sfreeindex + uintptr(bitIndex)
224 if result >= snelems {
225 s.freeindex = snelems
226 return snelems
227 }
228
229 s.allocCache >>= uint(bitIndex + 1)
230 sfreeindex = result + 1
231
232 if sfreeindex%64 == 0 && sfreeindex != snelems {
233
234
235
236
237
238 whichByte := sfreeindex / 8
239 s.refillAllocCache(whichByte)
240 }
241 s.freeindex = sfreeindex
242 return result
243 }
244
245
246
247
248
249
250 func (s *mspan) isFree(index uintptr) bool {
251 if index < s.freeindex {
252 return false
253 }
254 bytep, mask := s.allocBits.bitp(index)
255 return *bytep&mask == 0
256 }
257
258 func (s *mspan) objIndex(p uintptr) uintptr {
259 byteOffset := p - s.base()
260 if byteOffset == 0 {
261 return 0
262 }
263 if s.baseMask != 0 {
264
265 return byteOffset >> s.divShift
266 }
267 return uintptr(((uint64(byteOffset) >> s.divShift) * uint64(s.divMul)) >> s.divShift2)
268 }
269
270 func markBitsForAddr(p uintptr) markBits {
271 s := spanOf(p)
272 objIndex := s.objIndex(p)
273 return s.markBitsForIndex(objIndex)
274 }
275
276 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
277 bytep, mask := s.gcmarkBits.bitp(objIndex)
278 return markBits{bytep, mask, objIndex}
279 }
280
281 func (s *mspan) markBitsForBase() markBits {
282 return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
283 }
284
285
286 func (m markBits) isMarked() bool {
287 return *m.bytep&m.mask != 0
288 }
289
290
291 func (m markBits) setMarked() {
292
293
294
295 atomic.Or8(m.bytep, m.mask)
296 }
297
298
299 func (m markBits) setMarkedNonAtomic() {
300 *m.bytep |= m.mask
301 }
302
303
304 func (m markBits) clearMarked() {
305
306
307
308 atomic.And8(m.bytep, ^m.mask)
309 }
310
311
312 func markBitsForSpan(base uintptr) (mbits markBits) {
313 mbits = markBitsForAddr(base)
314 if mbits.mask != 1 {
315 throw("markBitsForSpan: unaligned start")
316 }
317 return mbits
318 }
319
320
321 func (m *markBits) advance() {
322 if m.mask == 1<<7 {
323 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
324 m.mask = 1
325 } else {
326 m.mask = m.mask << 1
327 }
328 m.index++
329 }
330
331
332
333
334
335
336
337 func heapBitsForAddr(addr uintptr) (h heapBits) {
338
339 arena := arenaIndex(addr)
340 ha := mheap_.arenas[arena.l1()][arena.l2()]
341
342
343
344 if ha == nil {
345
346
347 return
348 }
349 h.bitp = &ha.bitmap[(addr/(sys.PtrSize*4))%heapArenaBitmapBytes]
350 h.shift = uint32((addr / sys.PtrSize) & 3)
351 h.arena = uint32(arena)
352 h.last = &ha.bitmap[len(ha.bitmap)-1]
353 return
354 }
355
356
357 func badPointer(s *mspan, p, refBase, refOff uintptr) {
358
359
360
361
362
363
364
365
366 printlock()
367 print("runtime: pointer ", hex(p))
368 state := s.state.get()
369 if state != mSpanInUse {
370 print(" to unallocated span")
371 } else {
372 print(" to unused region of span")
373 }
374 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state, "\n")
375 if refBase != 0 {
376 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
377 gcDumpObject("object", refBase, refOff)
378 }
379 getg().m.traceback = 2
380 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
381 }
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
398 s = spanOf(p)
399
400
401 if s == nil {
402 return
403 }
404
405
406
407
408 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
409
410 if state == mSpanManual {
411 return
412 }
413
414
415 if debug.invalidptr != 0 {
416 badPointer(s, p, refBase, refOff)
417 }
418 return
419 }
420
421
422 if s.baseMask != 0 {
423
424 base = s.base()
425 base = base + (p-base)&uintptr(s.baseMask)
426 objIndex = (base - s.base()) >> s.divShift
427
428
429
430 } else {
431 base = s.base()
432 if p-base >= s.elemsize {
433
434 objIndex = uintptr(p-base) >> s.divShift * uintptr(s.divMul) >> s.divShift2
435 base += objIndex * s.elemsize
436 }
437 }
438 return
439 }
440
441
442
443
444
445
446
447 func (h heapBits) next() heapBits {
448 if h.shift < 3*heapBitsShift {
449 h.shift += heapBitsShift
450 } else if h.bitp != h.last {
451 h.bitp, h.shift = add1(h.bitp), 0
452 } else {
453
454 return h.nextArena()
455 }
456 return h
457 }
458
459
460
461
462
463
464
465
466
467
468 func (h heapBits) nextArena() heapBits {
469 h.arena++
470 ai := arenaIdx(h.arena)
471 l2 := mheap_.arenas[ai.l1()]
472 if l2 == nil {
473
474
475
476 return heapBits{}
477 }
478 ha := l2[ai.l2()]
479 if ha == nil {
480 return heapBits{}
481 }
482 h.bitp, h.shift = &ha.bitmap[0], 0
483 h.last = &ha.bitmap[len(ha.bitmap)-1]
484 return h
485 }
486
487
488
489
490
491
492
493 func (h heapBits) forward(n uintptr) heapBits {
494 n += uintptr(h.shift) / heapBitsShift
495 nbitp := uintptr(unsafe.Pointer(h.bitp)) + n/4
496 h.shift = uint32(n%4) * heapBitsShift
497 if nbitp <= uintptr(unsafe.Pointer(h.last)) {
498 h.bitp = (*uint8)(unsafe.Pointer(nbitp))
499 return h
500 }
501
502
503 past := nbitp - (uintptr(unsafe.Pointer(h.last)) + 1)
504 h.arena += 1 + uint32(past/heapArenaBitmapBytes)
505 ai := arenaIdx(h.arena)
506 if l2 := mheap_.arenas[ai.l1()]; l2 != nil && l2[ai.l2()] != nil {
507 a := l2[ai.l2()]
508 h.bitp = &a.bitmap[past%heapArenaBitmapBytes]
509 h.last = &a.bitmap[len(a.bitmap)-1]
510 } else {
511 h.bitp, h.last = nil, nil
512 }
513 return h
514 }
515
516
517
518
519 func (h heapBits) forwardOrBoundary(n uintptr) (heapBits, uintptr) {
520 maxn := 4 * ((uintptr(unsafe.Pointer(h.last)) + 1) - uintptr(unsafe.Pointer(h.bitp)))
521 if n > maxn {
522 n = maxn
523 }
524 return h.forward(n), n
525 }
526
527
528
529
530
531
532
533 func (h heapBits) bits() uint32 {
534
535
536 return uint32(*h.bitp) >> (h.shift & 31)
537 }
538
539
540
541
542 func (h heapBits) morePointers() bool {
543 return h.bits()&bitScan != 0
544 }
545
546
547
548
549
550 func (h heapBits) isPointer() bool {
551 return h.bits()&bitPointer != 0
552 }
553
554
555
556
557
558 func (h heapBits) isCheckmarked(size uintptr) bool {
559 if size == sys.PtrSize {
560 return (*h.bitp>>h.shift)&bitPointer != 0
561 }
562
563
564
565
566 return (*h.bitp>>(heapBitsShift+h.shift))&bitScan != 0
567 }
568
569
570
571
572
573 func (h heapBits) setCheckmarked(size uintptr) {
574 if size == sys.PtrSize {
575 atomic.Or8(h.bitp, bitPointer<<h.shift)
576 return
577 }
578 atomic.Or8(h.bitp, bitScan<<(heapBitsShift+h.shift))
579 }
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608 func bulkBarrierPreWrite(dst, src, size uintptr) {
609 if (dst|src|size)&(sys.PtrSize-1) != 0 {
610 throw("bulkBarrierPreWrite: unaligned arguments")
611 }
612 if !writeBarrier.needed {
613 return
614 }
615 if s := spanOf(dst); s == nil {
616
617
618 for _, datap := range activeModules() {
619 if datap.data <= dst && dst < datap.edata {
620 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
621 return
622 }
623 }
624 for _, datap := range activeModules() {
625 if datap.bss <= dst && dst < datap.ebss {
626 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
627 return
628 }
629 }
630 return
631 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
632
633
634
635
636
637
638 return
639 }
640
641 buf := &getg().m.p.ptr().wbBuf
642 h := heapBitsForAddr(dst)
643 if src == 0 {
644 for i := uintptr(0); i < size; i += sys.PtrSize {
645 if h.isPointer() {
646 dstx := (*uintptr)(unsafe.Pointer(dst + i))
647 if !buf.putFast(*dstx, 0) {
648 wbBufFlush(nil, 0)
649 }
650 }
651 h = h.next()
652 }
653 } else {
654 for i := uintptr(0); i < size; i += sys.PtrSize {
655 if h.isPointer() {
656 dstx := (*uintptr)(unsafe.Pointer(dst + i))
657 srcx := (*uintptr)(unsafe.Pointer(src + i))
658 if !buf.putFast(*dstx, *srcx) {
659 wbBufFlush(nil, 0)
660 }
661 }
662 h = h.next()
663 }
664 }
665 }
666
667
668
669
670
671
672
673
674
675
676 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
677 if (dst|src|size)&(sys.PtrSize-1) != 0 {
678 throw("bulkBarrierPreWrite: unaligned arguments")
679 }
680 if !writeBarrier.needed {
681 return
682 }
683 buf := &getg().m.p.ptr().wbBuf
684 h := heapBitsForAddr(dst)
685 for i := uintptr(0); i < size; i += sys.PtrSize {
686 if h.isPointer() {
687 srcx := (*uintptr)(unsafe.Pointer(src + i))
688 if !buf.putFast(0, *srcx) {
689 wbBufFlush(nil, 0)
690 }
691 }
692 h = h.next()
693 }
694 }
695
696
697
698
699
700
701
702
703
704 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
705 word := maskOffset / sys.PtrSize
706 bits = addb(bits, word/8)
707 mask := uint8(1) << (word % 8)
708
709 buf := &getg().m.p.ptr().wbBuf
710 for i := uintptr(0); i < size; i += sys.PtrSize {
711 if mask == 0 {
712 bits = addb(bits, 1)
713 if *bits == 0 {
714
715 i += 7 * sys.PtrSize
716 continue
717 }
718 mask = 1
719 }
720 if *bits&mask != 0 {
721 dstx := (*uintptr)(unsafe.Pointer(dst + i))
722 if src == 0 {
723 if !buf.putFast(*dstx, 0) {
724 wbBufFlush(nil, 0)
725 }
726 } else {
727 srcx := (*uintptr)(unsafe.Pointer(src + i))
728 if !buf.putFast(*dstx, *srcx) {
729 wbBufFlush(nil, 0)
730 }
731 }
732 }
733 mask <<= 1
734 }
735 }
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
755 if typ == nil {
756 throw("runtime: typeBitsBulkBarrier without type")
757 }
758 if typ.size != size {
759 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
760 throw("runtime: invalid typeBitsBulkBarrier")
761 }
762 if typ.kind&kindGCProg != 0 {
763 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
764 throw("runtime: invalid typeBitsBulkBarrier")
765 }
766 if !writeBarrier.needed {
767 return
768 }
769 ptrmask := typ.gcdata
770 buf := &getg().m.p.ptr().wbBuf
771 var bits uint32
772 for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
773 if i&(sys.PtrSize*8-1) == 0 {
774 bits = uint32(*ptrmask)
775 ptrmask = addb(ptrmask, 1)
776 } else {
777 bits = bits >> 1
778 }
779 if bits&1 != 0 {
780 dstx := (*uintptr)(unsafe.Pointer(dst + i))
781 srcx := (*uintptr)(unsafe.Pointer(src + i))
782 if !buf.putFast(*dstx, *srcx) {
783 wbBufFlush(nil, 0)
784 }
785 }
786 }
787 }
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802 func (h heapBits) initSpan(s *mspan) {
803
804 nw := (s.npages << _PageShift) / sys.PtrSize
805 if nw%wordsPerBitmapByte != 0 {
806 throw("initSpan: unaligned length")
807 }
808 if h.shift != 0 {
809 throw("initSpan: unaligned base")
810 }
811 isPtrs := sys.PtrSize == 8 && s.elemsize == sys.PtrSize
812 for nw > 0 {
813 hNext, anw := h.forwardOrBoundary(nw)
814 nbyte := anw / wordsPerBitmapByte
815 if isPtrs {
816 bitp := h.bitp
817 for i := uintptr(0); i < nbyte; i++ {
818 *bitp = bitPointerAll | bitScanAll
819 bitp = add1(bitp)
820 }
821 } else {
822 memclrNoHeapPointers(unsafe.Pointer(h.bitp), nbyte)
823 }
824 h = hNext
825 nw -= anw
826 }
827 }
828
829
830
831 func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
832
833 if sys.PtrSize == 8 && size == sys.PtrSize {
834
835
836
837
838 for i := uintptr(0); i < n; i += wordsPerBitmapByte {
839 *h.bitp &^= bitPointerAll
840 h = h.forward(wordsPerBitmapByte)
841 }
842 return
843 }
844 for i := uintptr(0); i < n; i++ {
845 *h.bitp &^= bitScan << (heapBitsShift + h.shift)
846 h = h.forward(size / sys.PtrSize)
847 }
848 }
849
850
851
852
853
854 func (h heapBits) clearCheckmarkSpan(size, n, total uintptr) {
855
856 if sys.PtrSize == 8 && size == sys.PtrSize {
857
858
859
860
861 for i := uintptr(0); i < n; i += wordsPerBitmapByte {
862 *h.bitp |= bitPointerAll
863 h = h.forward(wordsPerBitmapByte)
864 }
865 }
866 }
867
868
869
870 func (s *mspan) countAlloc() int {
871 count := 0
872 bytes := divRoundUp(s.nelems, 8)
873
874
875
876
877 for i := uintptr(0); i < bytes; i += 8 {
878
879
880
881
882 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
883 count += sys.OnesCount64(mrkBits)
884 }
885 return count
886 }
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
912 const doubleCheck = false
913
914
915
916
917
918
919
920
921
922 if sys.PtrSize == 8 && size == sys.PtrSize {
923
924
925
926
927 if doubleCheck {
928 h := heapBitsForAddr(x)
929 if !h.isPointer() {
930 throw("heapBitsSetType: pointer bit missing")
931 }
932 if !h.morePointers() {
933 throw("heapBitsSetType: scan bit missing")
934 }
935 }
936 return
937 }
938
939 h := heapBitsForAddr(x)
940 ptrmask := typ.gcdata
941
942
943
944
945
946
947 if size == 2*sys.PtrSize {
948 if typ.size == sys.PtrSize {
949
950
951
952
953
954
955
956
957 if sys.PtrSize == 4 && dataSize == sys.PtrSize {
958
959
960 *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
961 *h.bitp |= (bitPointer | bitScan) << h.shift
962 } else {
963
964 *h.bitp |= (bitPointer | bitScan | bitPointer<<heapBitsShift) << h.shift
965 }
966 return
967 }
968
969
970 if doubleCheck {
971 if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
972 print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
973 throw("heapBitsSetType")
974 }
975 }
976 b := uint32(*ptrmask)
977 hb := (b & 3) | bitScan
978
979
980
981
982 *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
983 *h.bitp |= uint8(hb << h.shift)
984 return
985 }
986
987
988
989
990
991
992
993
994 outOfPlace := false
995 if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrand()%2 == 0) {
996
997
998
999
1000
1001
1002 outOfPlace = true
1003 h.bitp = (*uint8)(unsafe.Pointer(x))
1004 h.last = nil
1005 }
1006
1007 var (
1008
1009 p *byte
1010 b uintptr
1011 nb uintptr
1012 endp *byte
1013 endnb uintptr
1014 pbits uintptr
1015
1016
1017 w uintptr
1018 nw uintptr
1019 hbitp *byte
1020 hb uintptr
1021 )
1022
1023 hbitp = h.bitp
1024
1025
1026
1027
1028
1029 if typ.kind&kindGCProg != 0 {
1030 heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
1031 if doubleCheck {
1032
1033
1034
1035
1036
1037
1038
1039 lock(&debugPtrmask.lock)
1040 if debugPtrmask.data == nil {
1041 debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
1042 }
1043 ptrmask = debugPtrmask.data
1044 runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
1045 }
1046 goto Phase4
1047 }
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 p = ptrmask
1081 if typ.size < dataSize {
1082
1083
1084
1085 const maxBits = sys.PtrSize*8 - 7
1086 if typ.ptrdata/sys.PtrSize <= maxBits {
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 nb = typ.ptrdata / sys.PtrSize
1098 for i := uintptr(0); i < nb; i += 8 {
1099 b |= uintptr(*p) << i
1100 p = add1(p)
1101 }
1102 nb = typ.size / sys.PtrSize
1103
1104
1105
1106
1107
1108
1109
1110 pbits = b
1111 endnb = nb
1112 if nb+nb <= maxBits {
1113 for endnb <= sys.PtrSize*8 {
1114 pbits |= pbits << endnb
1115 endnb += endnb
1116 }
1117
1118
1119
1120 endnb = uintptr(maxBits/byte(nb)) * nb
1121 pbits &= 1<<endnb - 1
1122 b = pbits
1123 nb = endnb
1124 }
1125
1126
1127
1128 p = nil
1129 endp = nil
1130 } else {
1131
1132 n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
1133 endp = addb(ptrmask, n)
1134 endnb = typ.size/sys.PtrSize - n*8
1135 }
1136 }
1137 if p != nil {
1138 b = uintptr(*p)
1139 p = add1(p)
1140 nb = 8
1141 }
1142
1143 if typ.size == dataSize {
1144
1145 nw = typ.ptrdata / sys.PtrSize
1146 } else {
1147
1148
1149
1150 nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
1151 }
1152 if nw == 0 {
1153
1154 println("runtime: invalid type ", typ.string())
1155 throw("heapBitsSetType: called with non-pointer type")
1156 return
1157 }
1158 if nw < 2 {
1159
1160
1161 nw = 2
1162 }
1163
1164
1165
1166
1167
1168
1169 switch {
1170 default:
1171 throw("heapBitsSetType: unexpected shift")
1172
1173 case h.shift == 0:
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188 hb = b & bitPointerAll
1189 hb |= bitScan | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
1190 if w += 4; w >= nw {
1191 goto Phase3
1192 }
1193 *hbitp = uint8(hb)
1194 hbitp = add1(hbitp)
1195 b >>= 4
1196 nb -= 4
1197
1198 case sys.PtrSize == 8 && h.shift == 2:
1199
1200
1201
1202
1203
1204
1205 hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift)
1206
1207
1208 hb |= bitScan << (2 * heapBitsShift)
1209 b >>= 2
1210 nb -= 2
1211
1212
1213 *hbitp &^= uint8((bitPointer | bitScan | (bitPointer << heapBitsShift)) << (2 * heapBitsShift))
1214 *hbitp |= uint8(hb)
1215 hbitp = add1(hbitp)
1216 if w += 2; w >= nw {
1217
1218
1219
1220 hb = 0
1221 w += 4
1222 goto Phase3
1223 }
1224 }
1225
1226
1227
1228
1229
1230
1231
1232 nb -= 4
1233 for {
1234
1235
1236
1237
1238
1239 hb = b & bitPointerAll
1240 hb |= bitScanAll
1241 if w += 4; w >= nw {
1242 break
1243 }
1244 *hbitp = uint8(hb)
1245 hbitp = add1(hbitp)
1246 b >>= 4
1247
1248
1249 if p != endp {
1250
1251
1252
1253
1254 if nb < 8 {
1255 b |= uintptr(*p) << nb
1256 p = add1(p)
1257 } else {
1258
1259
1260
1261
1262 nb -= 8
1263 }
1264 } else if p == nil {
1265
1266
1267 if nb < 8 {
1268 b |= pbits << nb
1269 nb += endnb
1270 }
1271 nb -= 8
1272 } else {
1273
1274
1275 b |= uintptr(*p) << nb
1276 nb += endnb
1277 if nb < 8 {
1278 b |= uintptr(*ptrmask) << nb
1279 p = add1(ptrmask)
1280 } else {
1281 nb -= 8
1282 p = ptrmask
1283 }
1284 }
1285
1286
1287 hb = b & bitPointerAll
1288 hb |= bitScanAll
1289 if w += 4; w >= nw {
1290 break
1291 }
1292 *hbitp = uint8(hb)
1293 hbitp = add1(hbitp)
1294 b >>= 4
1295 }
1296
1297 Phase3:
1298
1299 if w > nw {
1300
1301
1302
1303 mask := uintptr(1)<<(4-(w-nw)) - 1
1304 hb &= mask | mask<<4
1305 }
1306
1307
1308 nw = size / sys.PtrSize
1309
1310
1311
1312 if w <= nw {
1313 *hbitp = uint8(hb)
1314 hbitp = add1(hbitp)
1315 hb = 0
1316 for w += 4; w <= nw; w += 4 {
1317 *hbitp = 0
1318 hbitp = add1(hbitp)
1319 }
1320 }
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330 if w == nw+2 {
1331 *hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb)
1332 }
1333
1334 Phase4:
1335
1336 if outOfPlace {
1337
1338
1339 h := heapBitsForAddr(x)
1340
1341
1342 cnw := size / sys.PtrSize
1343 src := (*uint8)(unsafe.Pointer(x))
1344
1345
1346
1347
1348
1349
1350
1351 if doubleCheck {
1352 if !(h.shift == 0 || (sys.PtrSize == 8 && h.shift == 2)) {
1353 print("x=", x, " size=", size, " cnw=", h.shift, "\n")
1354 throw("bad start shift")
1355 }
1356 }
1357 if sys.PtrSize == 8 && h.shift == 2 {
1358 *h.bitp = *h.bitp&^((bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift)<<(2*heapBitsShift)) | *src
1359 h = h.next().next()
1360 cnw -= 2
1361 src = addb(src, 1)
1362 }
1363
1364
1365
1366 for cnw >= 4 {
1367
1368
1369 hNext, words := h.forwardOrBoundary(cnw / 4 * 4)
1370
1371
1372 n := words / 4
1373 memmove(unsafe.Pointer(h.bitp), unsafe.Pointer(src), n)
1374 cnw -= words
1375 h = hNext
1376 src = addb(src, n)
1377 }
1378 if doubleCheck && h.shift != 0 {
1379 print("cnw=", cnw, " h.shift=", h.shift, "\n")
1380 throw("bad shift after block copy")
1381 }
1382
1383 if cnw == 2 {
1384 *h.bitp = *h.bitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | *src
1385 src = addb(src, 1)
1386 h = h.next().next()
1387 }
1388 if doubleCheck {
1389 if uintptr(unsafe.Pointer(src)) > x+size {
1390 throw("copy exceeded object size")
1391 }
1392 if !(cnw == 0 || cnw == 2) {
1393 print("x=", x, " size=", size, " cnw=", cnw, "\n")
1394 throw("bad number of remaining words")
1395 }
1396
1397 hbitp = h.bitp
1398 }
1399
1400 memclrNoHeapPointers(unsafe.Pointer(x), uintptr(unsafe.Pointer(src))-x)
1401 }
1402
1403
1404 if doubleCheck {
1405
1406
1407 end := heapBitsForAddr(x + size - sys.PtrSize).next()
1408 endAI := arenaIdx(end.arena)
1409 if !outOfPlace && (end.bitp == nil || (end.shift == 0 && end.bitp == &mheap_.arenas[endAI.l1()][endAI.l2()].bitmap[0])) {
1410
1411
1412
1413 end.arena--
1414 endAI = arenaIdx(end.arena)
1415 end.bitp = addb(&mheap_.arenas[endAI.l1()][endAI.l2()].bitmap[0], heapArenaBitmapBytes)
1416 end.last = nil
1417 }
1418 if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) {
1419 println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size)
1420 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1421 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1422 h0 := heapBitsForAddr(x)
1423 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1424 print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n")
1425 throw("bad heapBitsSetType")
1426 }
1427
1428
1429
1430 h := heapBitsForAddr(x)
1431 nptr := typ.ptrdata / sys.PtrSize
1432 ndata := typ.size / sys.PtrSize
1433 count := dataSize / typ.size
1434 totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
1435 for i := uintptr(0); i < size/sys.PtrSize; i++ {
1436 j := i % ndata
1437 var have, want uint8
1438 have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
1439 if i >= totalptr {
1440 want = 0
1441 if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 {
1442 want = bitScan
1443 }
1444 } else {
1445 if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 {
1446 want |= bitPointer
1447 }
1448 if i != 1 {
1449 want |= bitScan
1450 } else {
1451 have &^= bitScan
1452 }
1453 }
1454 if have != want {
1455 println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size)
1456 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1457 print("kindGCProg=", typ.kind&kindGCProg != 0, " outOfPlace=", outOfPlace, "\n")
1458 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1459 h0 := heapBitsForAddr(x)
1460 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1461 print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
1462 print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
1463 println("at word", i, "offset", i*sys.PtrSize, "have", hex(have), "want", hex(want))
1464 if typ.kind&kindGCProg != 0 {
1465 println("GC program:")
1466 dumpGCProg(addb(typ.gcdata, 4))
1467 }
1468 throw("bad heapBitsSetType")
1469 }
1470 h = h.next()
1471 }
1472 if ptrmask == debugPtrmask.data {
1473 unlock(&debugPtrmask.lock)
1474 }
1475 }
1476 }
1477
1478 var debugPtrmask struct {
1479 lock mutex
1480 data *byte
1481 }
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493 func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
1494 if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
1495
1496 throw("heapBitsSetTypeGCProg: small allocation")
1497 }
1498 var totalBits uintptr
1499 if elemSize == dataSize {
1500 totalBits = runGCProg(prog, nil, h.bitp, 2)
1501 if totalBits*sys.PtrSize != progSize {
1502 println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
1503 throw("heapBitsSetTypeGCProg: unexpected bit count")
1504 }
1505 } else {
1506 count := dataSize / elemSize
1507
1508
1509
1510
1511
1512
1513
1514 var trailer [40]byte
1515 i := 0
1516 if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
1517
1518 trailer[i] = 0x01
1519 i++
1520 trailer[i] = 0
1521 i++
1522 if n > 1 {
1523
1524 trailer[i] = 0x81
1525 i++
1526 n--
1527 for ; n >= 0x80; n >>= 7 {
1528 trailer[i] = byte(n | 0x80)
1529 i++
1530 }
1531 trailer[i] = byte(n)
1532 i++
1533 }
1534 }
1535
1536 trailer[i] = 0x80
1537 i++
1538 n := elemSize / sys.PtrSize
1539 for ; n >= 0x80; n >>= 7 {
1540 trailer[i] = byte(n | 0x80)
1541 i++
1542 }
1543 trailer[i] = byte(n)
1544 i++
1545 n = count - 1
1546 for ; n >= 0x80; n >>= 7 {
1547 trailer[i] = byte(n | 0x80)
1548 i++
1549 }
1550 trailer[i] = byte(n)
1551 i++
1552 trailer[i] = 0
1553 i++
1554
1555 runGCProg(prog, &trailer[0], h.bitp, 2)
1556
1557
1558
1559
1560
1561
1562 totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
1563 }
1564 endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
1565 endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/sys.PtrSize/wordsPerBitmapByte))
1566 memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
1567 }
1568
1569
1570
1571
1572 func progToPointerMask(prog *byte, size uintptr) bitvector {
1573 n := (size/sys.PtrSize + 7) / 8
1574 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1575 x[len(x)-1] = 0xa1
1576 n = runGCProg(prog, nil, &x[0], 1)
1577 if x[len(x)-1] != 0xa1 {
1578 throw("progToPointerMask: overflow")
1579 }
1580 return bitvector{int32(n), &x[0]}
1581 }
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605 func runGCProg(prog, trailer, dst *byte, size int) uintptr {
1606 dstStart := dst
1607
1608
1609 var bits uintptr
1610 var nbits uintptr
1611
1612 p := prog
1613 Run:
1614 for {
1615
1616
1617 for ; nbits >= 8; nbits -= 8 {
1618 if size == 1 {
1619 *dst = uint8(bits)
1620 dst = add1(dst)
1621 bits >>= 8
1622 } else {
1623 v := bits&bitPointerAll | bitScanAll
1624 *dst = uint8(v)
1625 dst = add1(dst)
1626 bits >>= 4
1627 v = bits&bitPointerAll | bitScanAll
1628 *dst = uint8(v)
1629 dst = add1(dst)
1630 bits >>= 4
1631 }
1632 }
1633
1634
1635 inst := uintptr(*p)
1636 p = add1(p)
1637 n := inst & 0x7F
1638 if inst&0x80 == 0 {
1639
1640 if n == 0 {
1641
1642 if trailer != nil {
1643 p = trailer
1644 trailer = nil
1645 continue
1646 }
1647 break Run
1648 }
1649 nbyte := n / 8
1650 for i := uintptr(0); i < nbyte; i++ {
1651 bits |= uintptr(*p) << nbits
1652 p = add1(p)
1653 if size == 1 {
1654 *dst = uint8(bits)
1655 dst = add1(dst)
1656 bits >>= 8
1657 } else {
1658 v := bits&0xf | bitScanAll
1659 *dst = uint8(v)
1660 dst = add1(dst)
1661 bits >>= 4
1662 v = bits&0xf | bitScanAll
1663 *dst = uint8(v)
1664 dst = add1(dst)
1665 bits >>= 4
1666 }
1667 }
1668 if n %= 8; n > 0 {
1669 bits |= uintptr(*p) << nbits
1670 p = add1(p)
1671 nbits += n
1672 }
1673 continue Run
1674 }
1675
1676
1677 if n == 0 {
1678 for off := uint(0); ; off += 7 {
1679 x := uintptr(*p)
1680 p = add1(p)
1681 n |= (x & 0x7F) << off
1682 if x&0x80 == 0 {
1683 break
1684 }
1685 }
1686 }
1687
1688
1689 c := uintptr(0)
1690 for off := uint(0); ; off += 7 {
1691 x := uintptr(*p)
1692 p = add1(p)
1693 c |= (x & 0x7F) << off
1694 if x&0x80 == 0 {
1695 break
1696 }
1697 }
1698 c *= n
1699
1700
1701
1702
1703
1704
1705
1706
1707 src := dst
1708 const maxBits = sys.PtrSize*8 - 7
1709 if n <= maxBits {
1710
1711 pattern := bits
1712 npattern := nbits
1713
1714
1715 if size == 1 {
1716 src = subtract1(src)
1717 for npattern < n {
1718 pattern <<= 8
1719 pattern |= uintptr(*src)
1720 src = subtract1(src)
1721 npattern += 8
1722 }
1723 } else {
1724 src = subtract1(src)
1725 for npattern < n {
1726 pattern <<= 4
1727 pattern |= uintptr(*src) & 0xf
1728 src = subtract1(src)
1729 npattern += 4
1730 }
1731 }
1732
1733
1734
1735
1736
1737 if npattern > n {
1738 pattern >>= npattern - n
1739 npattern = n
1740 }
1741
1742
1743 if npattern == 1 {
1744
1745
1746
1747
1748
1749
1750 if pattern == 1 {
1751 pattern = 1<<maxBits - 1
1752 npattern = maxBits
1753 } else {
1754 npattern = c
1755 }
1756 } else {
1757 b := pattern
1758 nb := npattern
1759 if nb+nb <= maxBits {
1760
1761 for nb <= sys.PtrSize*8 {
1762 b |= b << nb
1763 nb += nb
1764 }
1765
1766
1767 nb = maxBits / npattern * npattern
1768 b &= 1<<nb - 1
1769 pattern = b
1770 npattern = nb
1771 }
1772 }
1773
1774
1775
1776
1777 for ; c >= npattern; c -= npattern {
1778 bits |= pattern << nbits
1779 nbits += npattern
1780 if size == 1 {
1781 for nbits >= 8 {
1782 *dst = uint8(bits)
1783 dst = add1(dst)
1784 bits >>= 8
1785 nbits -= 8
1786 }
1787 } else {
1788 for nbits >= 4 {
1789 *dst = uint8(bits&0xf | bitScanAll)
1790 dst = add1(dst)
1791 bits >>= 4
1792 nbits -= 4
1793 }
1794 }
1795 }
1796
1797
1798 if c > 0 {
1799 pattern &= 1<<c - 1
1800 bits |= pattern << nbits
1801 nbits += c
1802 }
1803 continue Run
1804 }
1805
1806
1807
1808
1809 off := n - nbits
1810 if size == 1 {
1811
1812 src = subtractb(src, (off+7)/8)
1813 if frag := off & 7; frag != 0 {
1814 bits |= uintptr(*src) >> (8 - frag) << nbits
1815 src = add1(src)
1816 nbits += frag
1817 c -= frag
1818 }
1819
1820
1821 for i := c / 8; i > 0; i-- {
1822 bits |= uintptr(*src) << nbits
1823 src = add1(src)
1824 *dst = uint8(bits)
1825 dst = add1(dst)
1826 bits >>= 8
1827 }
1828
1829 if c %= 8; c > 0 {
1830 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1831 nbits += c
1832 }
1833 } else {
1834
1835 src = subtractb(src, (off+3)/4)
1836 if frag := off & 3; frag != 0 {
1837 bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits
1838 src = add1(src)
1839 nbits += frag
1840 c -= frag
1841 }
1842
1843
1844 for i := c / 4; i > 0; i-- {
1845 bits |= (uintptr(*src) & 0xf) << nbits
1846 src = add1(src)
1847 *dst = uint8(bits&0xf | bitScanAll)
1848 dst = add1(dst)
1849 bits >>= 4
1850 }
1851
1852 if c %= 4; c > 0 {
1853 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1854 nbits += c
1855 }
1856 }
1857 }
1858
1859
1860 var totalBits uintptr
1861 if size == 1 {
1862 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1863 nbits += -nbits & 7
1864 for ; nbits > 0; nbits -= 8 {
1865 *dst = uint8(bits)
1866 dst = add1(dst)
1867 bits >>= 8
1868 }
1869 } else {
1870 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*4 + nbits
1871 nbits += -nbits & 3
1872 for ; nbits > 0; nbits -= 4 {
1873 v := bits&0xf | bitScanAll
1874 *dst = uint8(v)
1875 dst = add1(dst)
1876 bits >>= 4
1877 }
1878 }
1879 return totalBits
1880 }
1881
1882
1883
1884
1885
1886
1887 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
1888
1889 bitmapBytes := divRoundUp(ptrdata, 8*sys.PtrSize)
1890
1891 pages := divRoundUp(bitmapBytes, pageSize)
1892 s := mheap_.allocManual(pages, &memstats.gc_sys)
1893 runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1)
1894 return s
1895 }
1896 func dematerializeGCProg(s *mspan) {
1897 mheap_.freeManual(s, &memstats.gc_sys)
1898 }
1899
1900 func dumpGCProg(p *byte) {
1901 nptr := 0
1902 for {
1903 x := *p
1904 p = add1(p)
1905 if x == 0 {
1906 print("\t", nptr, " end\n")
1907 break
1908 }
1909 if x&0x80 == 0 {
1910 print("\t", nptr, " lit ", x, ":")
1911 n := int(x+7) / 8
1912 for i := 0; i < n; i++ {
1913 print(" ", hex(*p))
1914 p = add1(p)
1915 }
1916 print("\n")
1917 nptr += int(x)
1918 } else {
1919 nbit := int(x &^ 0x80)
1920 if nbit == 0 {
1921 for nb := uint(0); ; nb += 7 {
1922 x := *p
1923 p = add1(p)
1924 nbit |= int(x&0x7f) << nb
1925 if x&0x80 == 0 {
1926 break
1927 }
1928 }
1929 }
1930 count := 0
1931 for nb := uint(0); ; nb += 7 {
1932 x := *p
1933 p = add1(p)
1934 count |= int(x&0x7f) << nb
1935 if x&0x80 == 0 {
1936 break
1937 }
1938 }
1939 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1940 nptr += nbit * count
1941 }
1942 }
1943 }
1944
1945
1946
1947 func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
1948 target := (*stkframe)(ctxt)
1949 if frame.sp <= target.sp && target.sp < frame.varp {
1950 *target = *frame
1951 return false
1952 }
1953 return true
1954 }
1955
1956
1957
1958
1959 func reflect_gcbits(x interface{}) []byte {
1960 ret := getgcmask(x)
1961 typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
1962 nptr := typ.ptrdata / sys.PtrSize
1963 for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
1964 ret = ret[:len(ret)-1]
1965 }
1966 return ret
1967 }
1968
1969
1970
1971
1972 func getgcmask(ep interface{}) (mask []byte) {
1973 e := *efaceOf(&ep)
1974 p := e.data
1975 t := e._type
1976
1977 for _, datap := range activeModules() {
1978
1979 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1980 bitmap := datap.gcdatamask.bytedata
1981 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
1982 mask = make([]byte, n/sys.PtrSize)
1983 for i := uintptr(0); i < n; i += sys.PtrSize {
1984 off := (uintptr(p) + i - datap.data) / sys.PtrSize
1985 mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1986 }
1987 return
1988 }
1989
1990
1991 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1992 bitmap := datap.gcbssmask.bytedata
1993 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
1994 mask = make([]byte, n/sys.PtrSize)
1995 for i := uintptr(0); i < n; i += sys.PtrSize {
1996 off := (uintptr(p) + i - datap.bss) / sys.PtrSize
1997 mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1998 }
1999 return
2000 }
2001 }
2002
2003
2004 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
2005 hbits := heapBitsForAddr(base)
2006 n := s.elemsize
2007 mask = make([]byte, n/sys.PtrSize)
2008 for i := uintptr(0); i < n; i += sys.PtrSize {
2009 if hbits.isPointer() {
2010 mask[i/sys.PtrSize] = 1
2011 }
2012 if i != 1*sys.PtrSize && !hbits.morePointers() {
2013 mask = mask[:i/sys.PtrSize]
2014 break
2015 }
2016 hbits = hbits.next()
2017 }
2018 return
2019 }
2020
2021
2022 if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi {
2023 var frame stkframe
2024 frame.sp = uintptr(p)
2025 _g_ := getg()
2026 gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
2027 if frame.fn.valid() {
2028 locals, _, _ := getStackMap(&frame, nil, false)
2029 if locals.n == 0 {
2030 return
2031 }
2032 size := uintptr(locals.n) * sys.PtrSize
2033 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
2034 mask = make([]byte, n/sys.PtrSize)
2035 for i := uintptr(0); i < n; i += sys.PtrSize {
2036 off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
2037 mask[i/sys.PtrSize] = locals.ptrbit(off)
2038 }
2039 }
2040 return
2041 }
2042
2043
2044
2045
2046 return
2047 }
2048
View as plain text