Source file
src/runtime/stack.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "internal/cpu"
9 "runtime/internal/atomic"
10 "runtime/internal/sys"
11 "unsafe"
12 )
13
14
63
64 const (
65
66
67
68
69 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosIos*sys.GoarchArm64*1024
70
71
72 _StackMin = 2048
73
74
75
76 _FixedStack0 = _StackMin + _StackSystem
77 _FixedStack1 = _FixedStack0 - 1
78 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
79 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
80 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
81 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
82 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
83 _FixedStack = _FixedStack6 + 1
84
85
86
87
88
89
90 _StackBig = 4096
91
92
93
94 _StackGuard = 928*sys.StackGuardMultiplier + _StackSystem
95
96
97
98
99 _StackSmall = 128
100
101
102
103 _StackLimit = _StackGuard - _StackSystem - _StackSmall
104 )
105
106 const (
107
108
109
110
111
112 stackDebug = 0
113 stackFromSystem = 0
114 stackFaultOnFree = 0
115 stackPoisonCopy = 0
116 stackNoCache = 0
117
118
119 debugCheckBP = false
120 )
121
122 const (
123 uintptrMask = 1<<(8*sys.PtrSize) - 1
124
125
126
127
128
129 stackPreempt = uintptrMask & -1314
130
131
132
133
134 stackFork = uintptrMask & -1234
135 )
136
137
138
139
140
141 var stackpool [_NumStackOrders]struct {
142 item stackpoolItem
143 _ [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
144 }
145
146
147 type stackpoolItem struct {
148 mu mutex
149 span mSpanList
150 }
151
152
153 var stackLarge struct {
154 lock mutex
155 free [heapAddrBits - pageShift]mSpanList
156 }
157
158 func stackinit() {
159 if _StackCacheSize&_PageMask != 0 {
160 throw("cache size must be a multiple of page size")
161 }
162 for i := range stackpool {
163 stackpool[i].item.span.init()
164 lockInit(&stackpool[i].item.mu, lockRankStackpool)
165 }
166 for i := range stackLarge.free {
167 stackLarge.free[i].init()
168 lockInit(&stackLarge.lock, lockRankStackLarge)
169 }
170 }
171
172
173 func stacklog2(n uintptr) int {
174 log2 := 0
175 for n > 1 {
176 n >>= 1
177 log2++
178 }
179 return log2
180 }
181
182
183
184 func stackpoolalloc(order uint8) gclinkptr {
185 list := &stackpool[order].item.span
186 s := list.first
187 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
188 if s == nil {
189
190 s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
191 if s == nil {
192 throw("out of memory")
193 }
194 if s.allocCount != 0 {
195 throw("bad allocCount")
196 }
197 if s.manualFreeList.ptr() != nil {
198 throw("bad manualFreeList")
199 }
200 osStackAlloc(s)
201 s.elemsize = _FixedStack << order
202 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
203 x := gclinkptr(s.base() + i)
204 x.ptr().next = s.manualFreeList
205 s.manualFreeList = x
206 }
207 list.insert(s)
208 }
209 x := s.manualFreeList
210 if x.ptr() == nil {
211 throw("span has no free stacks")
212 }
213 s.manualFreeList = x.ptr().next
214 s.allocCount++
215 if s.manualFreeList.ptr() == nil {
216
217 list.remove(s)
218 }
219 return x
220 }
221
222
223 func stackpoolfree(x gclinkptr, order uint8) {
224 s := spanOfUnchecked(uintptr(x))
225 if s.state.get() != mSpanManual {
226 throw("freeing stack not in a stack span")
227 }
228 if s.manualFreeList.ptr() == nil {
229
230 stackpool[order].item.span.insert(s)
231 }
232 x.ptr().next = s.manualFreeList
233 s.manualFreeList = x
234 s.allocCount--
235 if gcphase == _GCoff && s.allocCount == 0 {
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251 stackpool[order].item.span.remove(s)
252 s.manualFreeList = 0
253 osStackFree(s)
254 mheap_.freeManual(s, spanAllocStack)
255 }
256 }
257
258
259
260
261
262 func stackcacherefill(c *mcache, order uint8) {
263 if stackDebug >= 1 {
264 print("stackcacherefill order=", order, "\n")
265 }
266
267
268
269 var list gclinkptr
270 var size uintptr
271 lock(&stackpool[order].item.mu)
272 for size < _StackCacheSize/2 {
273 x := stackpoolalloc(order)
274 x.ptr().next = list
275 list = x
276 size += _FixedStack << order
277 }
278 unlock(&stackpool[order].item.mu)
279 c.stackcache[order].list = list
280 c.stackcache[order].size = size
281 }
282
283
284 func stackcacherelease(c *mcache, order uint8) {
285 if stackDebug >= 1 {
286 print("stackcacherelease order=", order, "\n")
287 }
288 x := c.stackcache[order].list
289 size := c.stackcache[order].size
290 lock(&stackpool[order].item.mu)
291 for size > _StackCacheSize/2 {
292 y := x.ptr().next
293 stackpoolfree(x, order)
294 x = y
295 size -= _FixedStack << order
296 }
297 unlock(&stackpool[order].item.mu)
298 c.stackcache[order].list = x
299 c.stackcache[order].size = size
300 }
301
302
303 func stackcache_clear(c *mcache) {
304 if stackDebug >= 1 {
305 print("stackcache clear\n")
306 }
307 for order := uint8(0); order < _NumStackOrders; order++ {
308 lock(&stackpool[order].item.mu)
309 x := c.stackcache[order].list
310 for x.ptr() != nil {
311 y := x.ptr().next
312 stackpoolfree(x, order)
313 x = y
314 }
315 c.stackcache[order].list = 0
316 c.stackcache[order].size = 0
317 unlock(&stackpool[order].item.mu)
318 }
319 }
320
321
322
323
324
325
326
327 func stackalloc(n uint32) stack {
328
329
330
331 thisg := getg()
332 if thisg != thisg.m.g0 {
333 throw("stackalloc not on scheduler stack")
334 }
335 if n&(n-1) != 0 {
336 throw("stack size not a power of 2")
337 }
338 if stackDebug >= 1 {
339 print("stackalloc ", n, "\n")
340 }
341
342 if debug.efence != 0 || stackFromSystem != 0 {
343 n = uint32(alignUp(uintptr(n), physPageSize))
344 v := sysAlloc(uintptr(n), &memstats.stacks_sys)
345 if v == nil {
346 throw("out of memory (stackalloc)")
347 }
348 return stack{uintptr(v), uintptr(v) + uintptr(n)}
349 }
350
351
352
353
354 var v unsafe.Pointer
355 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
356 order := uint8(0)
357 n2 := n
358 for n2 > _FixedStack {
359 order++
360 n2 >>= 1
361 }
362 var x gclinkptr
363 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
364
365
366
367
368 lock(&stackpool[order].item.mu)
369 x = stackpoolalloc(order)
370 unlock(&stackpool[order].item.mu)
371 } else {
372 c := thisg.m.p.ptr().mcache
373 x = c.stackcache[order].list
374 if x.ptr() == nil {
375 stackcacherefill(c, order)
376 x = c.stackcache[order].list
377 }
378 c.stackcache[order].list = x.ptr().next
379 c.stackcache[order].size -= uintptr(n)
380 }
381 v = unsafe.Pointer(x)
382 } else {
383 var s *mspan
384 npage := uintptr(n) >> _PageShift
385 log2npage := stacklog2(npage)
386
387
388 lock(&stackLarge.lock)
389 if !stackLarge.free[log2npage].isEmpty() {
390 s = stackLarge.free[log2npage].first
391 stackLarge.free[log2npage].remove(s)
392 }
393 unlock(&stackLarge.lock)
394
395 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
396
397 if s == nil {
398
399 s = mheap_.allocManual(npage, spanAllocStack)
400 if s == nil {
401 throw("out of memory")
402 }
403 osStackAlloc(s)
404 s.elemsize = uintptr(n)
405 }
406 v = unsafe.Pointer(s.base())
407 }
408
409 if raceenabled {
410 racemalloc(v, uintptr(n))
411 }
412 if msanenabled {
413 msanmalloc(v, uintptr(n))
414 }
415 if stackDebug >= 1 {
416 print(" allocated ", v, "\n")
417 }
418 return stack{uintptr(v), uintptr(v) + uintptr(n)}
419 }
420
421
422
423
424
425
426
427 func stackfree(stk stack) {
428 gp := getg()
429 v := unsafe.Pointer(stk.lo)
430 n := stk.hi - stk.lo
431 if n&(n-1) != 0 {
432 throw("stack not a power of 2")
433 }
434 if stk.lo+n < stk.hi {
435 throw("bad stack size")
436 }
437 if stackDebug >= 1 {
438 println("stackfree", v, n)
439 memclrNoHeapPointers(v, n)
440 }
441 if debug.efence != 0 || stackFromSystem != 0 {
442 if debug.efence != 0 || stackFaultOnFree != 0 {
443 sysFault(v, n)
444 } else {
445 sysFree(v, n, &memstats.stacks_sys)
446 }
447 return
448 }
449 if msanenabled {
450 msanfree(v, n)
451 }
452 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
453 order := uint8(0)
454 n2 := n
455 for n2 > _FixedStack {
456 order++
457 n2 >>= 1
458 }
459 x := gclinkptr(v)
460 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
461 lock(&stackpool[order].item.mu)
462 stackpoolfree(x, order)
463 unlock(&stackpool[order].item.mu)
464 } else {
465 c := gp.m.p.ptr().mcache
466 if c.stackcache[order].size >= _StackCacheSize {
467 stackcacherelease(c, order)
468 }
469 x.ptr().next = c.stackcache[order].list
470 c.stackcache[order].list = x
471 c.stackcache[order].size += n
472 }
473 } else {
474 s := spanOfUnchecked(uintptr(v))
475 if s.state.get() != mSpanManual {
476 println(hex(s.base()), v)
477 throw("bad span state")
478 }
479 if gcphase == _GCoff {
480
481
482 osStackFree(s)
483 mheap_.freeManual(s, spanAllocStack)
484 } else {
485
486
487
488
489
490 log2npage := stacklog2(s.npages)
491 lock(&stackLarge.lock)
492 stackLarge.free[log2npage].insert(s)
493 unlock(&stackLarge.lock)
494 }
495 }
496 }
497
498 var maxstacksize uintptr = 1 << 20
499
500 var maxstackceiling = maxstacksize
501
502 var ptrnames = []string{
503 0: "scalar",
504 1: "ptr",
505 }
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535 type adjustinfo struct {
536 old stack
537 delta uintptr
538 cache pcvalueCache
539
540
541 sghi uintptr
542 }
543
544
545
546 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
547 pp := (*uintptr)(vpp)
548 p := *pp
549 if stackDebug >= 4 {
550 print(" ", pp, ":", hex(p), "\n")
551 }
552 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
553 *pp = p + adjinfo.delta
554 if stackDebug >= 3 {
555 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
556 }
557 }
558 }
559
560
561
562 type bitvector struct {
563 n int32
564 bytedata *uint8
565 }
566
567
568
569
570
571 func (bv *bitvector) ptrbit(i uintptr) uint8 {
572 b := *(addb(bv.bytedata, i/8))
573 return (b >> (i % 8)) & 1
574 }
575
576
577
578 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
579 minp := adjinfo.old.lo
580 maxp := adjinfo.old.hi
581 delta := adjinfo.delta
582 num := uintptr(bv.n)
583
584
585
586
587
588 useCAS := uintptr(scanp) < adjinfo.sghi
589 for i := uintptr(0); i < num; i += 8 {
590 if stackDebug >= 4 {
591 for j := uintptr(0); j < 8; j++ {
592 print(" ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
593 }
594 }
595 b := *(addb(bv.bytedata, i/8))
596 for b != 0 {
597 j := uintptr(sys.Ctz8(b))
598 b &= b - 1
599 pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize))
600 retry:
601 p := *pp
602 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
603
604
605 getg().m.traceback = 2
606 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
607 throw("invalid pointer found on stack")
608 }
609 if minp <= p && p < maxp {
610 if stackDebug >= 3 {
611 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
612 }
613 if useCAS {
614 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
615 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
616 goto retry
617 }
618 } else {
619 *pp = p + delta
620 }
621 }
622 }
623 }
624 }
625
626
627 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
628 adjinfo := (*adjustinfo)(arg)
629 if frame.continpc == 0 {
630
631 return true
632 }
633 f := frame.fn
634 if stackDebug >= 2 {
635 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
636 }
637 if f.funcID == funcID_systemstack_switch {
638
639
640
641 return true
642 }
643
644 locals, args, objs := getStackMap(frame, &adjinfo.cache, true)
645
646
647 if locals.n > 0 {
648 size := uintptr(locals.n) * sys.PtrSize
649 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
650 }
651
652
653
654 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
655 if stackDebug >= 3 {
656 print(" saved bp\n")
657 }
658 if debugCheckBP {
659
660
661 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
662 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
663 println("runtime: found invalid frame pointer")
664 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
665 throw("bad frame pointer")
666 }
667 }
668 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
669 }
670
671
672 if args.n > 0 {
673 if stackDebug >= 3 {
674 print(" args\n")
675 }
676 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
677 }
678
679
680
681 if frame.varp != 0 {
682 for _, obj := range objs {
683 off := obj.off
684 base := frame.varp
685 if off >= 0 {
686 base = frame.argp
687 }
688 p := base + uintptr(off)
689 if p < frame.sp {
690
691
692
693 continue
694 }
695 t := obj.typ
696 gcdata := t.gcdata
697 var s *mspan
698 if t.kind&kindGCProg != 0 {
699
700 s = materializeGCProg(t.ptrdata, gcdata)
701 gcdata = (*byte)(unsafe.Pointer(s.startAddr))
702 }
703 for i := uintptr(0); i < t.ptrdata; i += sys.PtrSize {
704 if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 {
705 adjustpointer(adjinfo, unsafe.Pointer(p+i))
706 }
707 }
708 if s != nil {
709 dematerializeGCProg(s)
710 }
711 }
712 }
713
714 return true
715 }
716
717 func adjustctxt(gp *g, adjinfo *adjustinfo) {
718 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
719 if !framepointer_enabled {
720 return
721 }
722 if debugCheckBP {
723 bp := gp.sched.bp
724 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
725 println("runtime: found invalid top frame pointer")
726 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
727 throw("bad top frame pointer")
728 }
729 }
730 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
731 }
732
733 func adjustdefers(gp *g, adjinfo *adjustinfo) {
734
735
736
737 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
738 for d := gp._defer; d != nil; d = d.link {
739 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
740 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
741 adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
742 adjustpointer(adjinfo, unsafe.Pointer(&d.link))
743 adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
744 adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
745 }
746
747
748
749
750 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
751 }
752
753 func adjustpanics(gp *g, adjinfo *adjustinfo) {
754
755
756 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
757 }
758
759 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
760
761
762 for s := gp.waiting; s != nil; s = s.waitlink {
763 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
764 }
765 }
766
767 func fillstack(stk stack, b byte) {
768 for p := stk.lo; p < stk.hi; p++ {
769 *(*byte)(unsafe.Pointer(p)) = b
770 }
771 }
772
773 func findsghi(gp *g, stk stack) uintptr {
774 var sghi uintptr
775 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
776 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
777 if stk.lo <= p && p < stk.hi && p > sghi {
778 sghi = p
779 }
780 }
781 return sghi
782 }
783
784
785
786
787 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
788 if gp.waiting == nil {
789 return 0
790 }
791
792
793 var lastc *hchan
794 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
795 if sg.c != lastc {
796
797
798
799
800
801
802
803
804
805 lockWithRank(&sg.c.lock, lockRankHchanLeaf)
806 }
807 lastc = sg.c
808 }
809
810
811 adjustsudogs(gp, adjinfo)
812
813
814
815
816 var sgsize uintptr
817 if adjinfo.sghi != 0 {
818 oldBot := adjinfo.old.hi - used
819 newBot := oldBot + adjinfo.delta
820 sgsize = adjinfo.sghi - oldBot
821 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
822 }
823
824
825 lastc = nil
826 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
827 if sg.c != lastc {
828 unlock(&sg.c.lock)
829 }
830 lastc = sg.c
831 }
832
833 return sgsize
834 }
835
836
837
838 func copystack(gp *g, newsize uintptr) {
839 if gp.syscallsp != 0 {
840 throw("stack growth not allowed in system call")
841 }
842 old := gp.stack
843 if old.lo == 0 {
844 throw("nil stackbase")
845 }
846 used := old.hi - gp.sched.sp
847
848
849 new := stackalloc(uint32(newsize))
850 if stackPoisonCopy != 0 {
851 fillstack(new, 0xfd)
852 }
853 if stackDebug >= 1 {
854 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
855 }
856
857
858 var adjinfo adjustinfo
859 adjinfo.old = old
860 adjinfo.delta = new.hi - old.hi
861
862
863 ncopy := used
864 if !gp.activeStackChans {
865 if newsize < old.hi-old.lo && atomic.Load8(&gp.parkingOnChan) != 0 {
866
867
868
869
870 throw("racy sudog adjustment due to parking on channel")
871 }
872 adjustsudogs(gp, &adjinfo)
873 } else {
874
875
876
877
878
879
880
881 adjinfo.sghi = findsghi(gp, old)
882
883
884
885 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
886 }
887
888
889 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
890
891
892
893
894 adjustctxt(gp, &adjinfo)
895 adjustdefers(gp, &adjinfo)
896 adjustpanics(gp, &adjinfo)
897 if adjinfo.sghi != 0 {
898 adjinfo.sghi += adjinfo.delta
899 }
900
901
902 gp.stack = new
903 gp.stackguard0 = new.lo + _StackGuard
904 gp.sched.sp = new.hi - used
905 gp.stktopsp += adjinfo.delta
906
907
908 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
909
910
911 if stackPoisonCopy != 0 {
912 fillstack(old, 0xfc)
913 }
914 stackfree(old)
915 }
916
917
918 func round2(x int32) int32 {
919 s := uint(0)
920 for 1<<s < x {
921 s++
922 }
923 return 1 << s
924 }
925
926
927
928
929
930
931
932
933
934
935
936
937
938 func newstack() {
939 thisg := getg()
940
941 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
942 throw("stack growth after fork")
943 }
944 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
945 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
946 morebuf := thisg.m.morebuf
947 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
948 throw("runtime: wrong goroutine in newstack")
949 }
950
951 gp := thisg.m.curg
952
953 if thisg.m.curg.throwsplit {
954
955 morebuf := thisg.m.morebuf
956 gp.syscallsp = morebuf.sp
957 gp.syscallpc = morebuf.pc
958 pcname, pcoff := "(unknown)", uintptr(0)
959 f := findfunc(gp.sched.pc)
960 if f.valid() {
961 pcname = funcname(f)
962 pcoff = gp.sched.pc - f.entry
963 }
964 print("runtime: newstack at ", pcname, "+", hex(pcoff),
965 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
966 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
967 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
968
969 thisg.m.traceback = 2
970 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
971 throw("runtime: stack split at bad time")
972 }
973
974 morebuf := thisg.m.morebuf
975 thisg.m.morebuf.pc = 0
976 thisg.m.morebuf.lr = 0
977 thisg.m.morebuf.sp = 0
978 thisg.m.morebuf.g = 0
979
980
981
982
983 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
984
985
986
987
988
989
990
991
992
993
994
995
996
997 if preempt {
998 if !canPreemptM(thisg.m) {
999
1000
1001 gp.stackguard0 = gp.stack.lo + _StackGuard
1002 gogo(&gp.sched)
1003 }
1004 }
1005
1006 if gp.stack.lo == 0 {
1007 throw("missing stack in newstack")
1008 }
1009 sp := gp.sched.sp
1010 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM {
1011
1012 sp -= sys.PtrSize
1013 }
1014 if stackDebug >= 1 || sp < gp.stack.lo {
1015 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1016 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1017 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1018 }
1019 if sp < gp.stack.lo {
1020 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
1021 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1022 throw("runtime: split stack overflow")
1023 }
1024
1025 if preempt {
1026 if gp == thisg.m.g0 {
1027 throw("runtime: preempt g0")
1028 }
1029 if thisg.m.p == 0 && thisg.m.locks == 0 {
1030 throw("runtime: g is running but p is not")
1031 }
1032
1033 if gp.preemptShrink {
1034
1035
1036 gp.preemptShrink = false
1037 shrinkstack(gp)
1038 }
1039
1040 if gp.preemptStop {
1041 preemptPark(gp)
1042 }
1043
1044
1045 gopreempt_m(gp)
1046 }
1047
1048
1049 oldsize := gp.stack.hi - gp.stack.lo
1050 newsize := oldsize * 2
1051
1052
1053
1054
1055 if f := findfunc(gp.sched.pc); f.valid() {
1056 max := uintptr(funcMaxSPDelta(f))
1057 for newsize-oldsize < max+_StackGuard {
1058 newsize *= 2
1059 }
1060 }
1061
1062 if newsize > maxstacksize || newsize > maxstackceiling {
1063 if maxstacksize < maxstackceiling {
1064 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1065 } else {
1066 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
1067 }
1068 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
1069 throw("stack overflow")
1070 }
1071
1072
1073
1074 casgstatus(gp, _Grunning, _Gcopystack)
1075
1076
1077
1078 copystack(gp, newsize)
1079 if stackDebug >= 1 {
1080 print("stack grow done\n")
1081 }
1082 casgstatus(gp, _Gcopystack, _Grunning)
1083 gogo(&gp.sched)
1084 }
1085
1086
1087 func nilfunc() {
1088 *(*uint8)(nil) = 0
1089 }
1090
1091
1092
1093 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1094 var fn unsafe.Pointer
1095 if fv != nil {
1096 fn = unsafe.Pointer(fv.fn)
1097 } else {
1098 fn = unsafe.Pointer(funcPC(nilfunc))
1099 }
1100 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1101 }
1102
1103
1104
1105
1106 func isShrinkStackSafe(gp *g) bool {
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 return gp.syscallsp == 0 && !gp.asyncSafePoint && atomic.Load8(&gp.parkingOnChan) == 0
1120 }
1121
1122
1123
1124
1125
1126 func shrinkstack(gp *g) {
1127 if gp.stack.lo == 0 {
1128 throw("missing stack in shrinkstack")
1129 }
1130 if s := readgstatus(gp); s&_Gscan == 0 {
1131
1132
1133
1134 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
1135
1136 throw("bad status in shrinkstack")
1137 }
1138 }
1139 if !isShrinkStackSafe(gp) {
1140 throw("shrinkstack at bad time")
1141 }
1142
1143
1144
1145 if gp == getg().m.curg && gp.m.libcallsp != 0 {
1146 throw("shrinking stack in libcall")
1147 }
1148
1149 if debug.gcshrinkstackoff > 0 {
1150 return
1151 }
1152 f := findfunc(gp.startpc)
1153 if f.valid() && f.funcID == funcID_gcBgMarkWorker {
1154
1155
1156 return
1157 }
1158
1159 oldsize := gp.stack.hi - gp.stack.lo
1160 newsize := oldsize / 2
1161
1162
1163 if newsize < _FixedStack {
1164 return
1165 }
1166
1167
1168
1169
1170
1171 avail := gp.stack.hi - gp.stack.lo
1172 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
1173 return
1174 }
1175
1176 if stackDebug > 0 {
1177 print("shrinking stack ", oldsize, "->", newsize, "\n")
1178 }
1179
1180 copystack(gp, newsize)
1181 }
1182
1183
1184 func freeStackSpans() {
1185
1186
1187 for order := range stackpool {
1188 lock(&stackpool[order].item.mu)
1189 list := &stackpool[order].item.span
1190 for s := list.first; s != nil; {
1191 next := s.next
1192 if s.allocCount == 0 {
1193 list.remove(s)
1194 s.manualFreeList = 0
1195 osStackFree(s)
1196 mheap_.freeManual(s, spanAllocStack)
1197 }
1198 s = next
1199 }
1200 unlock(&stackpool[order].item.mu)
1201 }
1202
1203
1204 lock(&stackLarge.lock)
1205 for i := range stackLarge.free {
1206 for s := stackLarge.free[i].first; s != nil; {
1207 next := s.next
1208 stackLarge.free[i].remove(s)
1209 osStackFree(s)
1210 mheap_.freeManual(s, spanAllocStack)
1211 s = next
1212 }
1213 }
1214 unlock(&stackLarge.lock)
1215 }
1216
1217
1218
1219 func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
1220 targetpc := frame.continpc
1221 if targetpc == 0 {
1222
1223 return
1224 }
1225
1226 f := frame.fn
1227 pcdata := int32(-1)
1228 if targetpc != f.entry {
1229
1230
1231
1232
1233 targetpc--
1234 pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
1235 }
1236 if pcdata == -1 {
1237
1238
1239
1240 pcdata = 0
1241 }
1242
1243
1244 size := frame.varp - frame.sp
1245 var minsize uintptr
1246 switch sys.ArchFamily {
1247 case sys.ARM64:
1248 minsize = sys.SpAlign
1249 default:
1250 minsize = sys.MinFrameSize
1251 }
1252 if size > minsize {
1253 stackid := pcdata
1254 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
1255 if stkmap == nil || stkmap.n <= 0 {
1256 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
1257 throw("missing stackmap")
1258 }
1259
1260 if stkmap.nbit > 0 {
1261 if stackid < 0 || stackid >= stkmap.n {
1262
1263 print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
1264 throw("bad symbol table")
1265 }
1266 locals = stackmapdata(stkmap, stackid)
1267 if stackDebug >= 3 && debug {
1268 print(" locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
1269 }
1270 } else if stackDebug >= 3 && debug {
1271 print(" no locals to adjust\n")
1272 }
1273 }
1274
1275
1276 if frame.arglen > 0 {
1277 if frame.argmap != nil {
1278
1279
1280
1281 args = *frame.argmap
1282 n := int32(frame.arglen / sys.PtrSize)
1283 if n < args.n {
1284 args.n = n
1285 }
1286 } else {
1287 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
1288 if stackmap == nil || stackmap.n <= 0 {
1289 print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
1290 throw("missing stackmap")
1291 }
1292 if pcdata < 0 || pcdata >= stackmap.n {
1293
1294 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
1295 throw("bad symbol table")
1296 }
1297 if stackmap.nbit > 0 {
1298 args = stackmapdata(stackmap, pcdata)
1299 }
1300 }
1301 }
1302
1303
1304 p := funcdata(f, _FUNCDATA_StackObjects)
1305 if p != nil {
1306 n := *(*uintptr)(p)
1307 p = add(p, sys.PtrSize)
1308 *(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
1309
1310
1311
1312
1313
1314 }
1315
1316 return
1317 }
1318
1319
1320
1321 type stackObjectRecord struct {
1322
1323
1324
1325 off int
1326 typ *_type
1327 }
1328
1329
1330
1331
1332
1333 func morestackc() {
1334 throw("attempt to execute system stack code on user stack")
1335 }
1336
View as plain text