Source file
src/runtime/proc.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "internal/cpu"
9 "runtime/internal/atomic"
10 "runtime/internal/sys"
11 "unsafe"
12 )
13
14 var buildVersion = sys.TheVersion
15
16
17 var modinfo string
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82 var (
83 m0 m
84 g0 g
85 mcache0 *mcache
86 raceprocctx0 uintptr
87 )
88
89
90 var runtime_inittask initTask
91
92
93 var main_inittask initTask
94
95
96
97
98
99 var main_init_done chan bool
100
101
102 func main_main()
103
104
105 var mainStarted bool
106
107
108 var runtimeInitTime int64
109
110
111 var initSigmask sigset
112
113
114 func main() {
115 g := getg()
116
117
118
119 g.m.g0.racectx = 0
120
121
122
123
124 if sys.PtrSize == 8 {
125 maxstacksize = 1000000000
126 } else {
127 maxstacksize = 250000000
128 }
129
130
131 mainStarted = true
132
133 if GOARCH != "wasm" {
134 systemstack(func() {
135 newm(sysmon, nil, -1)
136 })
137 }
138
139
140
141
142
143
144
145 lockOSThread()
146
147 if g.m != &m0 {
148 throw("runtime.main not on m0")
149 }
150
151 doInit(&runtime_inittask)
152 if nanotime() == 0 {
153 throw("nanotime returning zero")
154 }
155
156
157 needUnlock := true
158 defer func() {
159 if needUnlock {
160 unlockOSThread()
161 }
162 }()
163
164
165 runtimeInitTime = nanotime()
166
167 gcenable()
168
169 main_init_done = make(chan bool)
170 if iscgo {
171 if _cgo_thread_start == nil {
172 throw("_cgo_thread_start missing")
173 }
174 if GOOS != "windows" {
175 if _cgo_setenv == nil {
176 throw("_cgo_setenv missing")
177 }
178 if _cgo_unsetenv == nil {
179 throw("_cgo_unsetenv missing")
180 }
181 }
182 if _cgo_notify_runtime_init_done == nil {
183 throw("_cgo_notify_runtime_init_done missing")
184 }
185
186
187 startTemplateThread()
188 cgocall(_cgo_notify_runtime_init_done, nil)
189 }
190
191 doInit(&main_inittask)
192
193 close(main_init_done)
194
195 needUnlock = false
196 unlockOSThread()
197
198 if isarchive || islibrary {
199
200
201 return
202 }
203 fn := main_main
204 fn()
205 if raceenabled {
206 racefini()
207 }
208
209
210
211
212
213 if atomic.Load(&runningPanicDefers) != 0 {
214
215 for c := 0; c < 1000; c++ {
216 if atomic.Load(&runningPanicDefers) == 0 {
217 break
218 }
219 Gosched()
220 }
221 }
222 if atomic.Load(&panicking) != 0 {
223 gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
224 }
225
226 exit(0)
227 for {
228 var x *int32
229 *x = 0
230 }
231 }
232
233
234
235 func os_beforeExit() {
236 if raceenabled {
237 racefini()
238 }
239 }
240
241
242 func init() {
243 go forcegchelper()
244 }
245
246 func forcegchelper() {
247 forcegc.g = getg()
248 lockInit(&forcegc.lock, lockRankForcegc)
249 for {
250 lock(&forcegc.lock)
251 if forcegc.idle != 0 {
252 throw("forcegc: phase error")
253 }
254 atomic.Store(&forcegc.idle, 1)
255 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1)
256
257 if debug.gctrace > 0 {
258 println("GC forced")
259 }
260
261 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
262 }
263 }
264
265
266
267
268
269 func Gosched() {
270 checkTimeouts()
271 mcall(gosched_m)
272 }
273
274
275
276
277 func goschedguarded() {
278 mcall(goschedguarded_m)
279 }
280
281
282
283
284
285
286
287
288
289 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
290 if reason != waitReasonSleep {
291 checkTimeouts()
292 }
293 mp := acquirem()
294 gp := mp.curg
295 status := readgstatus(gp)
296 if status != _Grunning && status != _Gscanrunning {
297 throw("gopark: bad g status")
298 }
299 mp.waitlock = lock
300 mp.waitunlockf = unlockf
301 gp.waitreason = reason
302 mp.waittraceev = traceEv
303 mp.waittraceskip = traceskip
304 releasem(mp)
305
306 mcall(park_m)
307 }
308
309
310
311 func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
312 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
313 }
314
315 func goready(gp *g, traceskip int) {
316 systemstack(func() {
317 ready(gp, traceskip, true)
318 })
319 }
320
321
322 func acquireSudog() *sudog {
323
324
325
326
327
328
329
330
331 mp := acquirem()
332 pp := mp.p.ptr()
333 if len(pp.sudogcache) == 0 {
334 lock(&sched.sudoglock)
335
336 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
337 s := sched.sudogcache
338 sched.sudogcache = s.next
339 s.next = nil
340 pp.sudogcache = append(pp.sudogcache, s)
341 }
342 unlock(&sched.sudoglock)
343
344 if len(pp.sudogcache) == 0 {
345 pp.sudogcache = append(pp.sudogcache, new(sudog))
346 }
347 }
348 n := len(pp.sudogcache)
349 s := pp.sudogcache[n-1]
350 pp.sudogcache[n-1] = nil
351 pp.sudogcache = pp.sudogcache[:n-1]
352 if s.elem != nil {
353 throw("acquireSudog: found s.elem != nil in cache")
354 }
355 releasem(mp)
356 return s
357 }
358
359
360 func releaseSudog(s *sudog) {
361 if s.elem != nil {
362 throw("runtime: sudog with non-nil elem")
363 }
364 if s.isSelect {
365 throw("runtime: sudog with non-false isSelect")
366 }
367 if s.next != nil {
368 throw("runtime: sudog with non-nil next")
369 }
370 if s.prev != nil {
371 throw("runtime: sudog with non-nil prev")
372 }
373 if s.waitlink != nil {
374 throw("runtime: sudog with non-nil waitlink")
375 }
376 if s.c != nil {
377 throw("runtime: sudog with non-nil c")
378 }
379 gp := getg()
380 if gp.param != nil {
381 throw("runtime: releaseSudog with non-nil gp.param")
382 }
383 mp := acquirem()
384 pp := mp.p.ptr()
385 if len(pp.sudogcache) == cap(pp.sudogcache) {
386
387 var first, last *sudog
388 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
389 n := len(pp.sudogcache)
390 p := pp.sudogcache[n-1]
391 pp.sudogcache[n-1] = nil
392 pp.sudogcache = pp.sudogcache[:n-1]
393 if first == nil {
394 first = p
395 } else {
396 last.next = p
397 }
398 last = p
399 }
400 lock(&sched.sudoglock)
401 last.next = sched.sudogcache
402 sched.sudogcache = first
403 unlock(&sched.sudoglock)
404 }
405 pp.sudogcache = append(pp.sudogcache, s)
406 releasem(mp)
407 }
408
409
410
411
412
413
414
415
416
417 func funcPC(f interface{}) uintptr {
418 return *(*uintptr)(efaceOf(&f).data)
419 }
420
421
422 func badmcall(fn func(*g)) {
423 throw("runtime: mcall called on m->g0 stack")
424 }
425
426 func badmcall2(fn func(*g)) {
427 throw("runtime: mcall function returned")
428 }
429
430 func badreflectcall() {
431 panic(plainError("arg size to reflect.call more than 1GB"))
432 }
433
434 var badmorestackg0Msg = "fatal: morestack on g0\n"
435
436
437
438 func badmorestackg0() {
439 sp := stringStructOf(&badmorestackg0Msg)
440 write(2, sp.str, int32(sp.len))
441 }
442
443 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
444
445
446
447 func badmorestackgsignal() {
448 sp := stringStructOf(&badmorestackgsignalMsg)
449 write(2, sp.str, int32(sp.len))
450 }
451
452
453 func badctxt() {
454 throw("ctxt != 0")
455 }
456
457 func lockedOSThread() bool {
458 gp := getg()
459 return gp.lockedm != 0 && gp.m.lockedg != 0
460 }
461
462 var (
463 allgs []*g
464 allglock mutex
465 )
466
467 func allgadd(gp *g) {
468 if readgstatus(gp) == _Gidle {
469 throw("allgadd: bad status Gidle")
470 }
471
472 lock(&allglock)
473 allgs = append(allgs, gp)
474 allglen = uintptr(len(allgs))
475 unlock(&allglock)
476 }
477
478 const (
479
480
481 _GoidCacheBatch = 16
482 )
483
484
485
486 func cpuinit() {
487 const prefix = "GODEBUG="
488 var env string
489
490 switch GOOS {
491 case "aix", "darwin", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
492 cpu.DebugOptions = true
493
494
495
496
497 n := int32(0)
498 for argv_index(argv, argc+1+n) != nil {
499 n++
500 }
501
502 for i := int32(0); i < n; i++ {
503 p := argv_index(argv, argc+1+i)
504 s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
505
506 if hasPrefix(s, prefix) {
507 env = gostring(p)[len(prefix):]
508 break
509 }
510 }
511 }
512
513 cpu.Initialize(env)
514
515
516
517 x86HasPOPCNT = cpu.X86.HasPOPCNT
518 x86HasSSE41 = cpu.X86.HasSSE41
519 x86HasFMA = cpu.X86.HasFMA
520
521 armHasVFPv4 = cpu.ARM.HasVFPv4
522
523 arm64HasATOMICS = cpu.ARM64.HasATOMICS
524 }
525
526
527
528
529
530
531
532
533
534 func schedinit() {
535 lockInit(&sched.lock, lockRankSched)
536 lockInit(&sched.sysmonlock, lockRankSysmon)
537 lockInit(&sched.deferlock, lockRankDefer)
538 lockInit(&sched.sudoglock, lockRankSudog)
539 lockInit(&deadlock, lockRankDeadlock)
540 lockInit(&paniclk, lockRankPanic)
541 lockInit(&allglock, lockRankAllg)
542 lockInit(&allpLock, lockRankAllp)
543 lockInit(&reflectOffs.lock, lockRankReflectOffs)
544 lockInit(&finlock, lockRankFin)
545 lockInit(&trace.bufLock, lockRankTraceBuf)
546 lockInit(&trace.stringsLock, lockRankTraceStrings)
547 lockInit(&trace.lock, lockRankTrace)
548 lockInit(&cpuprof.lock, lockRankCpuprof)
549 lockInit(&trace.stackTab.lock, lockRankTraceStackTab)
550
551
552
553 _g_ := getg()
554 if raceenabled {
555 _g_.racectx, raceprocctx0 = raceinit()
556 }
557
558 sched.maxmcount = 10000
559
560 tracebackinit()
561 moduledataverify()
562 stackinit()
563 mallocinit()
564 fastrandinit()
565 mcommoninit(_g_.m, -1)
566 cpuinit()
567 alginit()
568 modulesinit()
569 typelinksinit()
570 itabsinit()
571
572 sigsave(&_g_.m.sigmask)
573 initSigmask = _g_.m.sigmask
574
575 goargs()
576 goenvs()
577 parsedebugvars()
578 gcinit()
579
580 sched.lastpoll = uint64(nanotime())
581 procs := ncpu
582 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
583 procs = n
584 }
585 if procresize(procs) != nil {
586 throw("unknown runnable goroutine during bootstrap")
587 }
588
589
590
591
592 if debug.cgocheck > 1 {
593 writeBarrier.cgo = true
594 writeBarrier.enabled = true
595 for _, p := range allp {
596 p.wbBuf.reset()
597 }
598 }
599
600 if buildVersion == "" {
601
602
603 buildVersion = "unknown"
604 }
605 if len(modinfo) == 1 {
606
607
608 modinfo = ""
609 }
610 }
611
612 func dumpgstatus(gp *g) {
613 _g_ := getg()
614 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
615 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
616 }
617
618 func checkmcount() {
619
620 if mcount() > sched.maxmcount {
621 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
622 throw("thread exhaustion")
623 }
624 }
625
626
627
628
629
630 func mReserveID() int64 {
631 if sched.mnext+1 < sched.mnext {
632 throw("runtime: thread ID overflow")
633 }
634 id := sched.mnext
635 sched.mnext++
636 checkmcount()
637 return id
638 }
639
640
641 func mcommoninit(mp *m, id int64) {
642 _g_ := getg()
643
644
645 if _g_ != _g_.m.g0 {
646 callers(1, mp.createstack[:])
647 }
648
649 lock(&sched.lock)
650
651 if id >= 0 {
652 mp.id = id
653 } else {
654 mp.id = mReserveID()
655 }
656
657 mp.fastrand[0] = uint32(int64Hash(uint64(mp.id), fastrandseed))
658 mp.fastrand[1] = uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
659 if mp.fastrand[0]|mp.fastrand[1] == 0 {
660 mp.fastrand[1] = 1
661 }
662
663 mpreinit(mp)
664 if mp.gsignal != nil {
665 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
666 }
667
668
669
670 mp.alllink = allm
671
672
673
674 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
675 unlock(&sched.lock)
676
677
678 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
679 mp.cgoCallers = new(cgoCallers)
680 }
681 }
682
683 var fastrandseed uintptr
684
685 func fastrandinit() {
686 s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
687 getRandomData(s)
688 }
689
690
691 func ready(gp *g, traceskip int, next bool) {
692 if trace.enabled {
693 traceGoUnpark(gp, traceskip)
694 }
695
696 status := readgstatus(gp)
697
698
699 _g_ := getg()
700 mp := acquirem()
701 if status&^_Gscan != _Gwaiting {
702 dumpgstatus(gp)
703 throw("bad g->status in ready")
704 }
705
706
707 casgstatus(gp, _Gwaiting, _Grunnable)
708 runqput(_g_.m.p.ptr(), gp, next)
709 wakep()
710 releasem(mp)
711 }
712
713
714
715 const freezeStopWait = 0x7fffffff
716
717
718
719 var freezing uint32
720
721
722
723
724 func freezetheworld() {
725 atomic.Store(&freezing, 1)
726
727
728
729 for i := 0; i < 5; i++ {
730
731 sched.stopwait = freezeStopWait
732 atomic.Store(&sched.gcwaiting, 1)
733
734 if !preemptall() {
735 break
736 }
737 usleep(1000)
738 }
739
740 usleep(1000)
741 preemptall()
742 usleep(1000)
743 }
744
745
746
747
748 func readgstatus(gp *g) uint32 {
749 return atomic.Load(&gp.atomicstatus)
750 }
751
752
753
754
755
756 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
757 success := false
758
759
760 switch oldval {
761 default:
762 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
763 dumpgstatus(gp)
764 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
765 case _Gscanrunnable,
766 _Gscanwaiting,
767 _Gscanrunning,
768 _Gscansyscall,
769 _Gscanpreempted:
770 if newval == oldval&^_Gscan {
771 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
772 }
773 }
774 if !success {
775 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
776 dumpgstatus(gp)
777 throw("casfrom_Gscanstatus: gp->status is not in scan state")
778 }
779 releaseLockRank(lockRankGscan)
780 }
781
782
783
784 func castogscanstatus(gp *g, oldval, newval uint32) bool {
785 switch oldval {
786 case _Grunnable,
787 _Grunning,
788 _Gwaiting,
789 _Gsyscall:
790 if newval == oldval|_Gscan {
791 r := atomic.Cas(&gp.atomicstatus, oldval, newval)
792 if r {
793 acquireLockRank(lockRankGscan)
794 }
795 return r
796
797 }
798 }
799 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
800 throw("castogscanstatus")
801 panic("not reached")
802 }
803
804
805
806
807
808
809 func casgstatus(gp *g, oldval, newval uint32) {
810 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
811 systemstack(func() {
812 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
813 throw("casgstatus: bad incoming values")
814 })
815 }
816
817 acquireLockRank(lockRankGscan)
818 releaseLockRank(lockRankGscan)
819
820
821 const yieldDelay = 5 * 1000
822 var nextYield int64
823
824
825
826 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
827 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
828 throw("casgstatus: waiting for Gwaiting but is Grunnable")
829 }
830 if i == 0 {
831 nextYield = nanotime() + yieldDelay
832 }
833 if nanotime() < nextYield {
834 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
835 procyield(1)
836 }
837 } else {
838 osyield()
839 nextYield = nanotime() + yieldDelay/2
840 }
841 }
842 }
843
844
845
846
847
848
849
850 func casgcopystack(gp *g) uint32 {
851 for {
852 oldstatus := readgstatus(gp) &^ _Gscan
853 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
854 throw("copystack: bad status, not Gwaiting or Grunnable")
855 }
856 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
857 return oldstatus
858 }
859 }
860 }
861
862
863
864
865
866 func casGToPreemptScan(gp *g, old, new uint32) {
867 if old != _Grunning || new != _Gscan|_Gpreempted {
868 throw("bad g transition")
869 }
870 acquireLockRank(lockRankGscan)
871 for !atomic.Cas(&gp.atomicstatus, _Grunning, _Gscan|_Gpreempted) {
872 }
873 }
874
875
876
877
878 func casGFromPreempted(gp *g, old, new uint32) bool {
879 if old != _Gpreempted || new != _Gwaiting {
880 throw("bad g transition")
881 }
882 return atomic.Cas(&gp.atomicstatus, _Gpreempted, _Gwaiting)
883 }
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899 func stopTheWorld(reason string) {
900 semacquire(&worldsema)
901 gp := getg()
902 gp.m.preemptoff = reason
903 systemstack(func() {
904
905
906
907
908
909
910
911
912
913
914 casgstatus(gp, _Grunning, _Gwaiting)
915 stopTheWorldWithSema()
916 casgstatus(gp, _Gwaiting, _Grunning)
917 })
918 }
919
920
921 func startTheWorld() {
922 systemstack(func() { startTheWorldWithSema(false) })
923
924
925 semrelease(&worldsema)
926 getg().m.preemptoff = ""
927 }
928
929
930
931
932 func stopTheWorldGC(reason string) {
933 semacquire(&gcsema)
934 stopTheWorld(reason)
935 }
936
937
938 func startTheWorldGC() {
939 startTheWorld()
940 semrelease(&gcsema)
941 }
942
943
944 var worldsema uint32 = 1
945
946
947
948
949
950
951
952 var gcsema uint32 = 1
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976 func stopTheWorldWithSema() {
977 _g_ := getg()
978
979
980
981 if _g_.m.locks > 0 {
982 throw("stopTheWorld: holding locks")
983 }
984
985 lock(&sched.lock)
986 sched.stopwait = gomaxprocs
987 atomic.Store(&sched.gcwaiting, 1)
988 preemptall()
989
990 _g_.m.p.ptr().status = _Pgcstop
991 sched.stopwait--
992
993 for _, p := range allp {
994 s := p.status
995 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
996 if trace.enabled {
997 traceGoSysBlock(p)
998 traceProcStop(p)
999 }
1000 p.syscalltick++
1001 sched.stopwait--
1002 }
1003 }
1004
1005 for {
1006 p := pidleget()
1007 if p == nil {
1008 break
1009 }
1010 p.status = _Pgcstop
1011 sched.stopwait--
1012 }
1013 wait := sched.stopwait > 0
1014 unlock(&sched.lock)
1015
1016
1017 if wait {
1018 for {
1019
1020 if notetsleep(&sched.stopnote, 100*1000) {
1021 noteclear(&sched.stopnote)
1022 break
1023 }
1024 preemptall()
1025 }
1026 }
1027
1028
1029 bad := ""
1030 if sched.stopwait != 0 {
1031 bad = "stopTheWorld: not stopped (stopwait != 0)"
1032 } else {
1033 for _, p := range allp {
1034 if p.status != _Pgcstop {
1035 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1036 }
1037 }
1038 }
1039 if atomic.Load(&freezing) != 0 {
1040
1041
1042
1043
1044 lock(&deadlock)
1045 lock(&deadlock)
1046 }
1047 if bad != "" {
1048 throw(bad)
1049 }
1050 }
1051
1052 func startTheWorldWithSema(emitTraceEvent bool) int64 {
1053 mp := acquirem()
1054 if netpollinited() {
1055 list := netpoll(0)
1056 injectglist(&list)
1057 }
1058 lock(&sched.lock)
1059
1060 procs := gomaxprocs
1061 if newprocs != 0 {
1062 procs = newprocs
1063 newprocs = 0
1064 }
1065 p1 := procresize(procs)
1066 sched.gcwaiting = 0
1067 if sched.sysmonwait != 0 {
1068 sched.sysmonwait = 0
1069 notewakeup(&sched.sysmonnote)
1070 }
1071 unlock(&sched.lock)
1072
1073 for p1 != nil {
1074 p := p1
1075 p1 = p1.link.ptr()
1076 if p.m != 0 {
1077 mp := p.m.ptr()
1078 p.m = 0
1079 if mp.nextp != 0 {
1080 throw("startTheWorld: inconsistent mp->nextp")
1081 }
1082 mp.nextp.set(p)
1083 notewakeup(&mp.park)
1084 } else {
1085
1086 newm(nil, p, -1)
1087 }
1088 }
1089
1090
1091 startTime := nanotime()
1092 if emitTraceEvent {
1093 traceGCSTWDone()
1094 }
1095
1096
1097
1098
1099 wakep()
1100
1101 releasem(mp)
1102
1103 return startTime
1104 }
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116 func mstart() {
1117 _g_ := getg()
1118
1119 osStack := _g_.stack.lo == 0
1120 if osStack {
1121
1122
1123
1124 size := _g_.stack.hi
1125 if size == 0 {
1126 size = 8192 * sys.StackGuardMultiplier
1127 }
1128 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1129 _g_.stack.lo = _g_.stack.hi - size + 1024
1130 }
1131
1132
1133 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1134
1135
1136 _g_.stackguard1 = _g_.stackguard0
1137 mstart1()
1138
1139
1140 switch GOOS {
1141 case "windows", "solaris", "illumos", "plan9", "darwin", "aix":
1142
1143
1144
1145 osStack = true
1146 }
1147 mexit(osStack)
1148 }
1149
1150 func mstart1() {
1151 _g_ := getg()
1152
1153 if _g_ != _g_.m.g0 {
1154 throw("bad runtime·mstart")
1155 }
1156
1157
1158
1159
1160
1161 save(getcallerpc(), getcallersp())
1162 asminit()
1163 minit()
1164
1165
1166
1167 if _g_.m == &m0 {
1168 mstartm0()
1169 }
1170
1171 if fn := _g_.m.mstartfn; fn != nil {
1172 fn()
1173 }
1174
1175 if _g_.m != &m0 {
1176 acquirep(_g_.m.nextp.ptr())
1177 _g_.m.nextp = 0
1178 }
1179 schedule()
1180 }
1181
1182
1183
1184
1185
1186
1187
1188 func mstartm0() {
1189
1190
1191
1192 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1193 cgoHasExtraM = true
1194 newextram()
1195 }
1196 initsig(false)
1197 }
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209 func mexit(osStack bool) {
1210 g := getg()
1211 m := g.m
1212
1213 if m == &m0 {
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225 handoffp(releasep())
1226 lock(&sched.lock)
1227 sched.nmfreed++
1228 checkdead()
1229 unlock(&sched.lock)
1230 notesleep(&m.park)
1231 throw("locked m0 woke up")
1232 }
1233
1234 sigblock()
1235 unminit()
1236
1237
1238 if m.gsignal != nil {
1239 stackfree(m.gsignal.stack)
1240
1241
1242
1243
1244 m.gsignal = nil
1245 }
1246
1247
1248 lock(&sched.lock)
1249 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1250 if *pprev == m {
1251 *pprev = m.alllink
1252 goto found
1253 }
1254 }
1255 throw("m not found in allm")
1256 found:
1257 if !osStack {
1258
1259
1260
1261
1262 atomic.Store(&m.freeWait, 1)
1263
1264
1265
1266
1267 m.freelink = sched.freem
1268 sched.freem = m
1269 }
1270 unlock(&sched.lock)
1271
1272
1273 handoffp(releasep())
1274
1275
1276
1277
1278
1279 lock(&sched.lock)
1280 sched.nmfreed++
1281 checkdead()
1282 unlock(&sched.lock)
1283
1284 if GOOS == "darwin" {
1285
1286
1287 if atomic.Load(&m.signalPending) != 0 {
1288 atomic.Xadd(&pendingPreemptSignals, -1)
1289 }
1290 }
1291
1292 if osStack {
1293
1294
1295 return
1296 }
1297
1298
1299
1300
1301
1302 exitThread(&m.freeWait)
1303 }
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316 func forEachP(fn func(*p)) {
1317 mp := acquirem()
1318 _p_ := getg().m.p.ptr()
1319
1320 lock(&sched.lock)
1321 if sched.safePointWait != 0 {
1322 throw("forEachP: sched.safePointWait != 0")
1323 }
1324 sched.safePointWait = gomaxprocs - 1
1325 sched.safePointFn = fn
1326
1327
1328 for _, p := range allp {
1329 if p != _p_ {
1330 atomic.Store(&p.runSafePointFn, 1)
1331 }
1332 }
1333 preemptall()
1334
1335
1336
1337
1338
1339
1340
1341 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1342 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1343 fn(p)
1344 sched.safePointWait--
1345 }
1346 }
1347
1348 wait := sched.safePointWait > 0
1349 unlock(&sched.lock)
1350
1351
1352 fn(_p_)
1353
1354
1355
1356 for _, p := range allp {
1357 s := p.status
1358 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
1359 if trace.enabled {
1360 traceGoSysBlock(p)
1361 traceProcStop(p)
1362 }
1363 p.syscalltick++
1364 handoffp(p)
1365 }
1366 }
1367
1368
1369 if wait {
1370 for {
1371
1372
1373
1374
1375 if notetsleep(&sched.safePointNote, 100*1000) {
1376 noteclear(&sched.safePointNote)
1377 break
1378 }
1379 preemptall()
1380 }
1381 }
1382 if sched.safePointWait != 0 {
1383 throw("forEachP: not done")
1384 }
1385 for _, p := range allp {
1386 if p.runSafePointFn != 0 {
1387 throw("forEachP: P did not run fn")
1388 }
1389 }
1390
1391 lock(&sched.lock)
1392 sched.safePointFn = nil
1393 unlock(&sched.lock)
1394 releasem(mp)
1395 }
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408 func runSafePointFn() {
1409 p := getg().m.p.ptr()
1410
1411
1412
1413 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1414 return
1415 }
1416 sched.safePointFn(p)
1417 lock(&sched.lock)
1418 sched.safePointWait--
1419 if sched.safePointWait == 0 {
1420 notewakeup(&sched.safePointNote)
1421 }
1422 unlock(&sched.lock)
1423 }
1424
1425
1426
1427
1428 var cgoThreadStart unsafe.Pointer
1429
1430 type cgothreadstart struct {
1431 g guintptr
1432 tls *uint64
1433 fn unsafe.Pointer
1434 }
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445 func allocm(_p_ *p, fn func(), id int64) *m {
1446 _g_ := getg()
1447 acquirem()
1448 if _g_.m.p == 0 {
1449 acquirep(_p_)
1450 }
1451
1452
1453
1454 if sched.freem != nil {
1455 lock(&sched.lock)
1456 var newList *m
1457 for freem := sched.freem; freem != nil; {
1458 if freem.freeWait != 0 {
1459 next := freem.freelink
1460 freem.freelink = newList
1461 newList = freem
1462 freem = next
1463 continue
1464 }
1465 stackfree(freem.g0.stack)
1466 freem = freem.freelink
1467 }
1468 sched.freem = newList
1469 unlock(&sched.lock)
1470 }
1471
1472 mp := new(m)
1473 mp.mstartfn = fn
1474 mcommoninit(mp, id)
1475
1476
1477
1478 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" || GOOS == "plan9" || GOOS == "darwin" {
1479 mp.g0 = malg(-1)
1480 } else {
1481 mp.g0 = malg(8192 * sys.StackGuardMultiplier)
1482 }
1483 mp.g0.m = mp
1484
1485 if _p_ == _g_.m.p.ptr() {
1486 releasep()
1487 }
1488 releasem(_g_.m)
1489
1490 return mp
1491 }
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527 func needm(x byte) {
1528 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1529
1530
1531
1532
1533
1534
1535 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
1536 exit(1)
1537 }
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547 var sigmask sigset
1548 sigsave(&sigmask)
1549 sigblock()
1550
1551
1552
1553
1554
1555 mp := lockextra(false)
1556
1557
1558
1559
1560
1561
1562
1563
1564 mp.needextram = mp.schedlink == 0
1565 extraMCount--
1566 unlockextra(mp.schedlink.ptr())
1567
1568
1569 mp.sigmask = sigmask
1570
1571
1572
1573
1574
1575
1576 setg(mp.g0)
1577 _g_ := getg()
1578 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
1579 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
1580 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1581
1582
1583 asminit()
1584 minit()
1585
1586
1587 casgstatus(mp.curg, _Gdead, _Gsyscall)
1588 atomic.Xadd(&sched.ngsys, -1)
1589 }
1590
1591 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
1592
1593
1594
1595
1596 func newextram() {
1597 c := atomic.Xchg(&extraMWaiters, 0)
1598 if c > 0 {
1599 for i := uint32(0); i < c; i++ {
1600 oneNewExtraM()
1601 }
1602 } else {
1603
1604 mp := lockextra(true)
1605 unlockextra(mp)
1606 if mp == nil {
1607 oneNewExtraM()
1608 }
1609 }
1610 }
1611
1612
1613 func oneNewExtraM() {
1614
1615
1616
1617
1618
1619 mp := allocm(nil, nil, -1)
1620 gp := malg(4096)
1621 gp.sched.pc = funcPC(goexit) + sys.PCQuantum
1622 gp.sched.sp = gp.stack.hi
1623 gp.sched.sp -= 4 * sys.RegSize
1624 gp.sched.lr = 0
1625 gp.sched.g = guintptr(unsafe.Pointer(gp))
1626 gp.syscallpc = gp.sched.pc
1627 gp.syscallsp = gp.sched.sp
1628 gp.stktopsp = gp.sched.sp
1629
1630
1631
1632
1633 casgstatus(gp, _Gidle, _Gdead)
1634 gp.m = mp
1635 mp.curg = gp
1636 mp.lockedInt++
1637 mp.lockedg.set(gp)
1638 gp.lockedm.set(mp)
1639 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
1640 if raceenabled {
1641 gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum)
1642 }
1643
1644 allgadd(gp)
1645
1646
1647
1648
1649
1650 atomic.Xadd(&sched.ngsys, +1)
1651
1652
1653 mnext := lockextra(true)
1654 mp.schedlink.set(mnext)
1655 extraMCount++
1656 unlockextra(mp)
1657 }
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682 func dropm() {
1683
1684
1685
1686 mp := getg().m
1687
1688
1689 casgstatus(mp.curg, _Gsyscall, _Gdead)
1690 mp.curg.preemptStop = false
1691 atomic.Xadd(&sched.ngsys, +1)
1692
1693
1694
1695
1696
1697 sigmask := mp.sigmask
1698 sigblock()
1699 unminit()
1700
1701 mnext := lockextra(true)
1702 extraMCount++
1703 mp.schedlink.set(mnext)
1704
1705 setg(nil)
1706
1707
1708 unlockextra(mp)
1709
1710 msigrestore(sigmask)
1711 }
1712
1713
1714 func getm() uintptr {
1715 return uintptr(unsafe.Pointer(getg().m))
1716 }
1717
1718 var extram uintptr
1719 var extraMCount uint32
1720 var extraMWaiters uint32
1721
1722
1723
1724
1725
1726
1727
1728 func lockextra(nilokay bool) *m {
1729 const locked = 1
1730
1731 incr := false
1732 for {
1733 old := atomic.Loaduintptr(&extram)
1734 if old == locked {
1735 osyield()
1736 continue
1737 }
1738 if old == 0 && !nilokay {
1739 if !incr {
1740
1741
1742
1743 atomic.Xadd(&extraMWaiters, 1)
1744 incr = true
1745 }
1746 usleep(1)
1747 continue
1748 }
1749 if atomic.Casuintptr(&extram, old, locked) {
1750 return (*m)(unsafe.Pointer(old))
1751 }
1752 osyield()
1753 continue
1754 }
1755 }
1756
1757
1758 func unlockextra(mp *m) {
1759 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
1760 }
1761
1762
1763
1764 var execLock rwmutex
1765
1766
1767
1768
1769 var newmHandoff struct {
1770 lock mutex
1771
1772
1773
1774 newm muintptr
1775
1776
1777
1778 waiting bool
1779 wake note
1780
1781
1782
1783
1784 haveTemplateThread uint32
1785 }
1786
1787
1788
1789
1790
1791
1792
1793 func newm(fn func(), _p_ *p, id int64) {
1794 mp := allocm(_p_, fn, id)
1795 mp.nextp.set(_p_)
1796 mp.sigmask = initSigmask
1797 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809 lock(&newmHandoff.lock)
1810 if newmHandoff.haveTemplateThread == 0 {
1811 throw("on a locked thread with no template thread")
1812 }
1813 mp.schedlink = newmHandoff.newm
1814 newmHandoff.newm.set(mp)
1815 if newmHandoff.waiting {
1816 newmHandoff.waiting = false
1817 notewakeup(&newmHandoff.wake)
1818 }
1819 unlock(&newmHandoff.lock)
1820 return
1821 }
1822 newm1(mp)
1823 }
1824
1825 func newm1(mp *m) {
1826 if iscgo {
1827 var ts cgothreadstart
1828 if _cgo_thread_start == nil {
1829 throw("_cgo_thread_start missing")
1830 }
1831 ts.g.set(mp.g0)
1832 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
1833 ts.fn = unsafe.Pointer(funcPC(mstart))
1834 if msanenabled {
1835 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
1836 }
1837 execLock.rlock()
1838 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
1839 execLock.runlock()
1840 return
1841 }
1842 execLock.rlock()
1843 newosproc(mp)
1844 execLock.runlock()
1845 }
1846
1847
1848
1849
1850
1851 func startTemplateThread() {
1852 if GOARCH == "wasm" {
1853 return
1854 }
1855
1856
1857
1858 mp := acquirem()
1859 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
1860 releasem(mp)
1861 return
1862 }
1863 newm(templateThread, nil, -1)
1864 releasem(mp)
1865 }
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879 func templateThread() {
1880 lock(&sched.lock)
1881 sched.nmsys++
1882 checkdead()
1883 unlock(&sched.lock)
1884
1885 for {
1886 lock(&newmHandoff.lock)
1887 for newmHandoff.newm != 0 {
1888 newm := newmHandoff.newm.ptr()
1889 newmHandoff.newm = 0
1890 unlock(&newmHandoff.lock)
1891 for newm != nil {
1892 next := newm.schedlink.ptr()
1893 newm.schedlink = 0
1894 newm1(newm)
1895 newm = next
1896 }
1897 lock(&newmHandoff.lock)
1898 }
1899 newmHandoff.waiting = true
1900 noteclear(&newmHandoff.wake)
1901 unlock(&newmHandoff.lock)
1902 notesleep(&newmHandoff.wake)
1903 }
1904 }
1905
1906
1907
1908 func stopm() {
1909 _g_ := getg()
1910
1911 if _g_.m.locks != 0 {
1912 throw("stopm holding locks")
1913 }
1914 if _g_.m.p != 0 {
1915 throw("stopm holding p")
1916 }
1917 if _g_.m.spinning {
1918 throw("stopm spinning")
1919 }
1920
1921 lock(&sched.lock)
1922 mput(_g_.m)
1923 unlock(&sched.lock)
1924 notesleep(&_g_.m.park)
1925 noteclear(&_g_.m.park)
1926 acquirep(_g_.m.nextp.ptr())
1927 _g_.m.nextp = 0
1928 }
1929
1930 func mspinning() {
1931
1932 getg().m.spinning = true
1933 }
1934
1935
1936
1937
1938
1939
1940
1941 func startm(_p_ *p, spinning bool) {
1942 lock(&sched.lock)
1943 if _p_ == nil {
1944 _p_ = pidleget()
1945 if _p_ == nil {
1946 unlock(&sched.lock)
1947 if spinning {
1948
1949
1950 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
1951 throw("startm: negative nmspinning")
1952 }
1953 }
1954 return
1955 }
1956 }
1957 mp := mget()
1958 if mp == nil {
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971 id := mReserveID()
1972 unlock(&sched.lock)
1973
1974 var fn func()
1975 if spinning {
1976
1977 fn = mspinning
1978 }
1979 newm(fn, _p_, id)
1980 return
1981 }
1982 unlock(&sched.lock)
1983 if mp.spinning {
1984 throw("startm: m is spinning")
1985 }
1986 if mp.nextp != 0 {
1987 throw("startm: m has p")
1988 }
1989 if spinning && !runqempty(_p_) {
1990 throw("startm: p has runnable gs")
1991 }
1992
1993 mp.spinning = spinning
1994 mp.nextp.set(_p_)
1995 notewakeup(&mp.park)
1996 }
1997
1998
1999
2000
2001 func handoffp(_p_ *p) {
2002
2003
2004
2005
2006 if !runqempty(_p_) || sched.runqsize != 0 {
2007 startm(_p_, false)
2008 return
2009 }
2010
2011 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
2012 startm(_p_, false)
2013 return
2014 }
2015
2016
2017 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) {
2018 startm(_p_, true)
2019 return
2020 }
2021 lock(&sched.lock)
2022 if sched.gcwaiting != 0 {
2023 _p_.status = _Pgcstop
2024 sched.stopwait--
2025 if sched.stopwait == 0 {
2026 notewakeup(&sched.stopnote)
2027 }
2028 unlock(&sched.lock)
2029 return
2030 }
2031 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
2032 sched.safePointFn(_p_)
2033 sched.safePointWait--
2034 if sched.safePointWait == 0 {
2035 notewakeup(&sched.safePointNote)
2036 }
2037 }
2038 if sched.runqsize != 0 {
2039 unlock(&sched.lock)
2040 startm(_p_, false)
2041 return
2042 }
2043
2044
2045 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
2046 unlock(&sched.lock)
2047 startm(_p_, false)
2048 return
2049 }
2050 if when := nobarrierWakeTime(_p_); when != 0 {
2051 wakeNetPoller(when)
2052 }
2053 pidleput(_p_)
2054 unlock(&sched.lock)
2055 }
2056
2057
2058
2059 func wakep() {
2060 if atomic.Load(&sched.npidle) == 0 {
2061 return
2062 }
2063
2064 if atomic.Load(&sched.nmspinning) != 0 || !atomic.Cas(&sched.nmspinning, 0, 1) {
2065 return
2066 }
2067 startm(nil, true)
2068 }
2069
2070
2071
2072 func stoplockedm() {
2073 _g_ := getg()
2074
2075 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
2076 throw("stoplockedm: inconsistent locking")
2077 }
2078 if _g_.m.p != 0 {
2079
2080 _p_ := releasep()
2081 handoffp(_p_)
2082 }
2083 incidlelocked(1)
2084
2085 notesleep(&_g_.m.park)
2086 noteclear(&_g_.m.park)
2087 status := readgstatus(_g_.m.lockedg.ptr())
2088 if status&^_Gscan != _Grunnable {
2089 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
2090 dumpgstatus(_g_)
2091 throw("stoplockedm: not runnable")
2092 }
2093 acquirep(_g_.m.nextp.ptr())
2094 _g_.m.nextp = 0
2095 }
2096
2097
2098
2099
2100 func startlockedm(gp *g) {
2101 _g_ := getg()
2102
2103 mp := gp.lockedm.ptr()
2104 if mp == _g_.m {
2105 throw("startlockedm: locked to me")
2106 }
2107 if mp.nextp != 0 {
2108 throw("startlockedm: m has p")
2109 }
2110
2111 incidlelocked(-1)
2112 _p_ := releasep()
2113 mp.nextp.set(_p_)
2114 notewakeup(&mp.park)
2115 stopm()
2116 }
2117
2118
2119
2120 func gcstopm() {
2121 _g_ := getg()
2122
2123 if sched.gcwaiting == 0 {
2124 throw("gcstopm: not waiting for gc")
2125 }
2126 if _g_.m.spinning {
2127 _g_.m.spinning = false
2128
2129
2130 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2131 throw("gcstopm: negative nmspinning")
2132 }
2133 }
2134 _p_ := releasep()
2135 lock(&sched.lock)
2136 _p_.status = _Pgcstop
2137 sched.stopwait--
2138 if sched.stopwait == 0 {
2139 notewakeup(&sched.stopnote)
2140 }
2141 unlock(&sched.lock)
2142 stopm()
2143 }
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154 func execute(gp *g, inheritTime bool) {
2155 _g_ := getg()
2156
2157
2158
2159 _g_.m.curg = gp
2160 gp.m = _g_.m
2161 casgstatus(gp, _Grunnable, _Grunning)
2162 gp.waitsince = 0
2163 gp.preempt = false
2164 gp.stackguard0 = gp.stack.lo + _StackGuard
2165 if !inheritTime {
2166 _g_.m.p.ptr().schedtick++
2167 }
2168
2169
2170 hz := sched.profilehz
2171 if _g_.m.profilehz != hz {
2172 setThreadCPUProfiler(hz)
2173 }
2174
2175 if trace.enabled {
2176
2177
2178 if gp.syscallsp != 0 && gp.sysblocktraced {
2179 traceGoSysExit(gp.sysexitticks)
2180 }
2181 traceGoStart()
2182 }
2183
2184 gogo(&gp.sched)
2185 }
2186
2187
2188
2189 func findrunnable() (gp *g, inheritTime bool) {
2190 _g_ := getg()
2191
2192
2193
2194
2195
2196 top:
2197 _p_ := _g_.m.p.ptr()
2198 if sched.gcwaiting != 0 {
2199 gcstopm()
2200 goto top
2201 }
2202 if _p_.runSafePointFn != 0 {
2203 runSafePointFn()
2204 }
2205
2206 now, pollUntil, _ := checkTimers(_p_, 0)
2207
2208 if fingwait && fingwake {
2209 if gp := wakefing(); gp != nil {
2210 ready(gp, 0, true)
2211 }
2212 }
2213 if *cgo_yield != nil {
2214 asmcgocall(*cgo_yield, nil)
2215 }
2216
2217
2218 if gp, inheritTime := runqget(_p_); gp != nil {
2219 return gp, inheritTime
2220 }
2221
2222
2223 if sched.runqsize != 0 {
2224 lock(&sched.lock)
2225 gp := globrunqget(_p_, 0)
2226 unlock(&sched.lock)
2227 if gp != nil {
2228 return gp, false
2229 }
2230 }
2231
2232
2233
2234
2235
2236
2237
2238
2239 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
2240 if list := netpoll(0); !list.empty() {
2241 gp := list.pop()
2242 injectglist(&list)
2243 casgstatus(gp, _Gwaiting, _Grunnable)
2244 if trace.enabled {
2245 traceGoUnpark(gp, 0)
2246 }
2247 return gp, false
2248 }
2249 }
2250
2251
2252 procs := uint32(gomaxprocs)
2253 ranTimer := false
2254
2255
2256
2257 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) {
2258 goto stop
2259 }
2260 if !_g_.m.spinning {
2261 _g_.m.spinning = true
2262 atomic.Xadd(&sched.nmspinning, 1)
2263 }
2264 for i := 0; i < 4; i++ {
2265 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
2266 if sched.gcwaiting != 0 {
2267 goto top
2268 }
2269 stealRunNextG := i > 2
2270 p2 := allp[enum.position()]
2271 if _p_ == p2 {
2272 continue
2273 }
2274 if gp := runqsteal(_p_, p2, stealRunNextG); gp != nil {
2275 return gp, false
2276 }
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288 if i > 2 || (i > 1 && shouldStealTimers(p2)) {
2289 tnow, w, ran := checkTimers(p2, now)
2290 now = tnow
2291 if w != 0 && (pollUntil == 0 || w < pollUntil) {
2292 pollUntil = w
2293 }
2294 if ran {
2295
2296
2297
2298
2299
2300
2301
2302
2303 if gp, inheritTime := runqget(_p_); gp != nil {
2304 return gp, inheritTime
2305 }
2306 ranTimer = true
2307 }
2308 }
2309 }
2310 }
2311 if ranTimer {
2312
2313 goto top
2314 }
2315
2316 stop:
2317
2318
2319
2320
2321 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) {
2322 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2323 gp := _p_.gcBgMarkWorker.ptr()
2324 casgstatus(gp, _Gwaiting, _Grunnable)
2325 if trace.enabled {
2326 traceGoUnpark(gp, 0)
2327 }
2328 return gp, false
2329 }
2330
2331 delta := int64(-1)
2332 if pollUntil != 0 {
2333
2334 delta = pollUntil - now
2335 }
2336
2337
2338
2339
2340
2341 gp, otherReady := beforeIdle(delta)
2342 if gp != nil {
2343 casgstatus(gp, _Gwaiting, _Grunnable)
2344 if trace.enabled {
2345 traceGoUnpark(gp, 0)
2346 }
2347 return gp, false
2348 }
2349 if otherReady {
2350 goto top
2351 }
2352
2353
2354
2355
2356
2357 allpSnapshot := allp
2358
2359
2360 lock(&sched.lock)
2361 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
2362 unlock(&sched.lock)
2363 goto top
2364 }
2365 if sched.runqsize != 0 {
2366 gp := globrunqget(_p_, 0)
2367 unlock(&sched.lock)
2368 return gp, false
2369 }
2370 if releasep() != _p_ {
2371 throw("findrunnable: wrong p")
2372 }
2373 pidleput(_p_)
2374 unlock(&sched.lock)
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389 wasSpinning := _g_.m.spinning
2390 if _g_.m.spinning {
2391 _g_.m.spinning = false
2392 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2393 throw("findrunnable: negative nmspinning")
2394 }
2395 }
2396
2397
2398 for _, _p_ := range allpSnapshot {
2399 if !runqempty(_p_) {
2400 lock(&sched.lock)
2401 _p_ = pidleget()
2402 unlock(&sched.lock)
2403 if _p_ != nil {
2404 acquirep(_p_)
2405 if wasSpinning {
2406 _g_.m.spinning = true
2407 atomic.Xadd(&sched.nmspinning, 1)
2408 }
2409 goto top
2410 }
2411 break
2412 }
2413 }
2414
2415
2416 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) {
2417 lock(&sched.lock)
2418 _p_ = pidleget()
2419 if _p_ != nil && _p_.gcBgMarkWorker == 0 {
2420 pidleput(_p_)
2421 _p_ = nil
2422 }
2423 unlock(&sched.lock)
2424 if _p_ != nil {
2425 acquirep(_p_)
2426 if wasSpinning {
2427 _g_.m.spinning = true
2428 atomic.Xadd(&sched.nmspinning, 1)
2429 }
2430
2431 goto stop
2432 }
2433 }
2434
2435
2436 if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
2437 atomic.Store64(&sched.pollUntil, uint64(pollUntil))
2438 if _g_.m.p != 0 {
2439 throw("findrunnable: netpoll with p")
2440 }
2441 if _g_.m.spinning {
2442 throw("findrunnable: netpoll with spinning")
2443 }
2444 if faketime != 0 {
2445
2446 delta = 0
2447 }
2448 list := netpoll(delta)
2449 atomic.Store64(&sched.pollUntil, 0)
2450 atomic.Store64(&sched.lastpoll, uint64(nanotime()))
2451 if faketime != 0 && list.empty() {
2452
2453
2454 stopm()
2455 goto top
2456 }
2457 lock(&sched.lock)
2458 _p_ = pidleget()
2459 unlock(&sched.lock)
2460 if _p_ == nil {
2461 injectglist(&list)
2462 } else {
2463 acquirep(_p_)
2464 if !list.empty() {
2465 gp := list.pop()
2466 injectglist(&list)
2467 casgstatus(gp, _Gwaiting, _Grunnable)
2468 if trace.enabled {
2469 traceGoUnpark(gp, 0)
2470 }
2471 return gp, false
2472 }
2473 if wasSpinning {
2474 _g_.m.spinning = true
2475 atomic.Xadd(&sched.nmspinning, 1)
2476 }
2477 goto top
2478 }
2479 } else if pollUntil != 0 && netpollinited() {
2480 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
2481 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
2482 netpollBreak()
2483 }
2484 }
2485 stopm()
2486 goto top
2487 }
2488
2489
2490
2491
2492
2493 func pollWork() bool {
2494 if sched.runqsize != 0 {
2495 return true
2496 }
2497 p := getg().m.p.ptr()
2498 if !runqempty(p) {
2499 return true
2500 }
2501 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
2502 if list := netpoll(0); !list.empty() {
2503 injectglist(&list)
2504 return true
2505 }
2506 }
2507 return false
2508 }
2509
2510
2511
2512
2513 func wakeNetPoller(when int64) {
2514 if atomic.Load64(&sched.lastpoll) == 0 {
2515
2516
2517
2518
2519 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
2520 if pollerPollUntil == 0 || pollerPollUntil > when {
2521 netpollBreak()
2522 }
2523 }
2524 }
2525
2526 func resetspinning() {
2527 _g_ := getg()
2528 if !_g_.m.spinning {
2529 throw("resetspinning: not a spinning m")
2530 }
2531 _g_.m.spinning = false
2532 nmspinning := atomic.Xadd(&sched.nmspinning, -1)
2533 if int32(nmspinning) < 0 {
2534 throw("findrunnable: negative nmspinning")
2535 }
2536
2537
2538
2539 wakep()
2540 }
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550 func injectglist(glist *gList) {
2551 if glist.empty() {
2552 return
2553 }
2554 if trace.enabled {
2555 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
2556 traceGoUnpark(gp, 0)
2557 }
2558 }
2559
2560
2561
2562 head := glist.head.ptr()
2563 var tail *g
2564 qsize := 0
2565 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
2566 tail = gp
2567 qsize++
2568 casgstatus(gp, _Gwaiting, _Grunnable)
2569 }
2570
2571
2572 var q gQueue
2573 q.head.set(head)
2574 q.tail.set(tail)
2575 *glist = gList{}
2576
2577 startIdle := func(n int) {
2578 for ; n != 0 && sched.npidle != 0; n-- {
2579 startm(nil, false)
2580 }
2581 }
2582
2583 pp := getg().m.p.ptr()
2584 if pp == nil {
2585 lock(&sched.lock)
2586 globrunqputbatch(&q, int32(qsize))
2587 unlock(&sched.lock)
2588 startIdle(qsize)
2589 return
2590 }
2591
2592 lock(&sched.lock)
2593 npidle := int(sched.npidle)
2594 var n int
2595 for n = 0; n < npidle && !q.empty(); n++ {
2596 globrunqput(q.pop())
2597 }
2598 unlock(&sched.lock)
2599 startIdle(n)
2600 qsize -= n
2601
2602 if !q.empty() {
2603 runqputbatch(pp, &q, qsize)
2604 }
2605 }
2606
2607
2608
2609 func schedule() {
2610 _g_ := getg()
2611
2612 if _g_.m.locks != 0 {
2613 throw("schedule: holding locks")
2614 }
2615
2616 if _g_.m.lockedg != 0 {
2617 stoplockedm()
2618 execute(_g_.m.lockedg.ptr(), false)
2619 }
2620
2621
2622
2623 if _g_.m.incgo {
2624 throw("schedule: in cgo")
2625 }
2626
2627 top:
2628 pp := _g_.m.p.ptr()
2629 pp.preempt = false
2630
2631 if sched.gcwaiting != 0 {
2632 gcstopm()
2633 goto top
2634 }
2635 if pp.runSafePointFn != 0 {
2636 runSafePointFn()
2637 }
2638
2639
2640
2641
2642 if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
2643 throw("schedule: spinning with local work")
2644 }
2645
2646 checkTimers(pp, 0)
2647
2648 var gp *g
2649 var inheritTime bool
2650
2651
2652
2653
2654 tryWakeP := false
2655 if trace.enabled || trace.shutdown {
2656 gp = traceReader()
2657 if gp != nil {
2658 casgstatus(gp, _Gwaiting, _Grunnable)
2659 traceGoUnpark(gp, 0)
2660 tryWakeP = true
2661 }
2662 }
2663 if gp == nil && gcBlackenEnabled != 0 {
2664 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
2665 tryWakeP = tryWakeP || gp != nil
2666 }
2667 if gp == nil {
2668
2669
2670
2671 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
2672 lock(&sched.lock)
2673 gp = globrunqget(_g_.m.p.ptr(), 1)
2674 unlock(&sched.lock)
2675 }
2676 }
2677 if gp == nil {
2678 gp, inheritTime = runqget(_g_.m.p.ptr())
2679
2680
2681 }
2682 if gp == nil {
2683 gp, inheritTime = findrunnable()
2684 }
2685
2686
2687
2688
2689 if _g_.m.spinning {
2690 resetspinning()
2691 }
2692
2693 if sched.disable.user && !schedEnabled(gp) {
2694
2695
2696
2697 lock(&sched.lock)
2698 if schedEnabled(gp) {
2699
2700
2701 unlock(&sched.lock)
2702 } else {
2703 sched.disable.runnable.pushBack(gp)
2704 sched.disable.n++
2705 unlock(&sched.lock)
2706 goto top
2707 }
2708 }
2709
2710
2711
2712 if tryWakeP {
2713 wakep()
2714 }
2715 if gp.lockedm != 0 {
2716
2717
2718 startlockedm(gp)
2719 goto top
2720 }
2721
2722 execute(gp, inheritTime)
2723 }
2724
2725
2726
2727
2728
2729
2730
2731
2732 func dropg() {
2733 _g_ := getg()
2734
2735 setMNoWB(&_g_.m.curg.m, nil)
2736 setGNoWB(&_g_.m.curg, nil)
2737 }
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748 func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
2749
2750
2751 if atomic.Load(&pp.adjustTimers) == 0 {
2752 next := int64(atomic.Load64(&pp.timer0When))
2753 if next == 0 {
2754 return now, 0, false
2755 }
2756 if now == 0 {
2757 now = nanotime()
2758 }
2759 if now < next {
2760
2761
2762
2763
2764 if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) {
2765 return now, next, false
2766 }
2767 }
2768 }
2769
2770 lock(&pp.timersLock)
2771
2772 adjusttimers(pp)
2773
2774 rnow = now
2775 if len(pp.timers) > 0 {
2776 if rnow == 0 {
2777 rnow = nanotime()
2778 }
2779 for len(pp.timers) > 0 {
2780
2781
2782 if tw := runtimer(pp, rnow); tw != 0 {
2783 if tw > 0 {
2784 pollUntil = tw
2785 }
2786 break
2787 }
2788 ran = true
2789 }
2790 }
2791
2792
2793
2794
2795 if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 {
2796 clearDeletedTimers(pp)
2797 }
2798
2799 unlock(&pp.timersLock)
2800
2801 return rnow, pollUntil, ran
2802 }
2803
2804
2805
2806
2807
2808 func shouldStealTimers(p2 *p) bool {
2809 if p2.status != _Prunning {
2810 return true
2811 }
2812 mp := p2.m.ptr()
2813 if mp == nil || mp.locks > 0 {
2814 return false
2815 }
2816 gp := mp.curg
2817 if gp == nil || gp.atomicstatus != _Grunning || !gp.preempt {
2818 return false
2819 }
2820 return true
2821 }
2822
2823 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
2824 unlock((*mutex)(lock))
2825 return true
2826 }
2827
2828
2829 func park_m(gp *g) {
2830 _g_ := getg()
2831
2832 if trace.enabled {
2833 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
2834 }
2835
2836 casgstatus(gp, _Grunning, _Gwaiting)
2837 dropg()
2838
2839 if fn := _g_.m.waitunlockf; fn != nil {
2840 ok := fn(gp, _g_.m.waitlock)
2841 _g_.m.waitunlockf = nil
2842 _g_.m.waitlock = nil
2843 if !ok {
2844 if trace.enabled {
2845 traceGoUnpark(gp, 2)
2846 }
2847 casgstatus(gp, _Gwaiting, _Grunnable)
2848 execute(gp, true)
2849 }
2850 }
2851 schedule()
2852 }
2853
2854 func goschedImpl(gp *g) {
2855 status := readgstatus(gp)
2856 if status&^_Gscan != _Grunning {
2857 dumpgstatus(gp)
2858 throw("bad g status")
2859 }
2860 casgstatus(gp, _Grunning, _Grunnable)
2861 dropg()
2862 lock(&sched.lock)
2863 globrunqput(gp)
2864 unlock(&sched.lock)
2865
2866 schedule()
2867 }
2868
2869
2870 func gosched_m(gp *g) {
2871 if trace.enabled {
2872 traceGoSched()
2873 }
2874 goschedImpl(gp)
2875 }
2876
2877
2878 func goschedguarded_m(gp *g) {
2879
2880 if !canPreemptM(gp.m) {
2881 gogo(&gp.sched)
2882 }
2883
2884 if trace.enabled {
2885 traceGoSched()
2886 }
2887 goschedImpl(gp)
2888 }
2889
2890 func gopreempt_m(gp *g) {
2891 if trace.enabled {
2892 traceGoPreempt()
2893 }
2894 goschedImpl(gp)
2895 }
2896
2897
2898
2899
2900 func preemptPark(gp *g) {
2901 if trace.enabled {
2902 traceGoPark(traceEvGoBlock, 0)
2903 }
2904 status := readgstatus(gp)
2905 if status&^_Gscan != _Grunning {
2906 dumpgstatus(gp)
2907 throw("bad g status")
2908 }
2909 gp.waitreason = waitReasonPreempted
2910
2911
2912
2913
2914
2915
2916 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
2917 dropg()
2918 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
2919 schedule()
2920 }
2921
2922
2923
2924
2925 func goyield() {
2926 checkTimeouts()
2927 mcall(goyield_m)
2928 }
2929
2930 func goyield_m(gp *g) {
2931 if trace.enabled {
2932 traceGoPreempt()
2933 }
2934 pp := gp.m.p.ptr()
2935 casgstatus(gp, _Grunning, _Grunnable)
2936 dropg()
2937 runqput(pp, gp, false)
2938 schedule()
2939 }
2940
2941
2942 func goexit1() {
2943 if raceenabled {
2944 racegoend()
2945 }
2946 if trace.enabled {
2947 traceGoEnd()
2948 }
2949 mcall(goexit0)
2950 }
2951
2952
2953 func goexit0(gp *g) {
2954 _g_ := getg()
2955
2956 casgstatus(gp, _Grunning, _Gdead)
2957 if isSystemGoroutine(gp, false) {
2958 atomic.Xadd(&sched.ngsys, -1)
2959 }
2960 gp.m = nil
2961 locked := gp.lockedm != 0
2962 gp.lockedm = 0
2963 _g_.m.lockedg = 0
2964 gp.preemptStop = false
2965 gp.paniconfault = false
2966 gp._defer = nil
2967 gp._panic = nil
2968 gp.writebuf = nil
2969 gp.waitreason = 0
2970 gp.param = nil
2971 gp.labels = nil
2972 gp.timer = nil
2973
2974 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
2975
2976
2977
2978 scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes))
2979 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
2980 gp.gcAssistBytes = 0
2981 }
2982
2983 dropg()
2984
2985 if GOARCH == "wasm" {
2986 gfput(_g_.m.p.ptr(), gp)
2987 schedule()
2988 }
2989
2990 if _g_.m.lockedInt != 0 {
2991 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
2992 throw("internal lockOSThread error")
2993 }
2994 gfput(_g_.m.p.ptr(), gp)
2995 if locked {
2996
2997
2998
2999
3000
3001
3002 if GOOS != "plan9" {
3003 gogo(&_g_.m.g0.sched)
3004 } else {
3005
3006
3007 _g_.m.lockedExt = 0
3008 }
3009 }
3010 schedule()
3011 }
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021 func save(pc, sp uintptr) {
3022 _g_ := getg()
3023
3024 _g_.sched.pc = pc
3025 _g_.sched.sp = sp
3026 _g_.sched.lr = 0
3027 _g_.sched.ret = 0
3028 _g_.sched.g = guintptr(unsafe.Pointer(_g_))
3029
3030
3031
3032 if _g_.sched.ctxt != nil {
3033 badctxt()
3034 }
3035 }
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074 func reentersyscall(pc, sp uintptr) {
3075 _g_ := getg()
3076
3077
3078
3079 _g_.m.locks++
3080
3081
3082
3083
3084
3085 _g_.stackguard0 = stackPreempt
3086 _g_.throwsplit = true
3087
3088
3089 save(pc, sp)
3090 _g_.syscallsp = sp
3091 _g_.syscallpc = pc
3092 casgstatus(_g_, _Grunning, _Gsyscall)
3093 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3094 systemstack(func() {
3095 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3096 throw("entersyscall")
3097 })
3098 }
3099
3100 if trace.enabled {
3101 systemstack(traceGoSysCall)
3102
3103
3104
3105 save(pc, sp)
3106 }
3107
3108 if atomic.Load(&sched.sysmonwait) != 0 {
3109 systemstack(entersyscall_sysmon)
3110 save(pc, sp)
3111 }
3112
3113 if _g_.m.p.ptr().runSafePointFn != 0 {
3114
3115 systemstack(runSafePointFn)
3116 save(pc, sp)
3117 }
3118
3119 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
3120 _g_.sysblocktraced = true
3121 pp := _g_.m.p.ptr()
3122 pp.m = 0
3123 _g_.m.oldp.set(pp)
3124 _g_.m.p = 0
3125 atomic.Store(&pp.status, _Psyscall)
3126 if sched.gcwaiting != 0 {
3127 systemstack(entersyscall_gcwait)
3128 save(pc, sp)
3129 }
3130
3131 _g_.m.locks--
3132 }
3133
3134
3135
3136
3137
3138
3139
3140 func entersyscall() {
3141 reentersyscall(getcallerpc(), getcallersp())
3142 }
3143
3144 func entersyscall_sysmon() {
3145 lock(&sched.lock)
3146 if atomic.Load(&sched.sysmonwait) != 0 {
3147 atomic.Store(&sched.sysmonwait, 0)
3148 notewakeup(&sched.sysmonnote)
3149 }
3150 unlock(&sched.lock)
3151 }
3152
3153 func entersyscall_gcwait() {
3154 _g_ := getg()
3155 _p_ := _g_.m.oldp.ptr()
3156
3157 lock(&sched.lock)
3158 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
3159 if trace.enabled {
3160 traceGoSysBlock(_p_)
3161 traceProcStop(_p_)
3162 }
3163 _p_.syscalltick++
3164 if sched.stopwait--; sched.stopwait == 0 {
3165 notewakeup(&sched.stopnote)
3166 }
3167 }
3168 unlock(&sched.lock)
3169 }
3170
3171
3172
3173 func entersyscallblock() {
3174 _g_ := getg()
3175
3176 _g_.m.locks++
3177 _g_.throwsplit = true
3178 _g_.stackguard0 = stackPreempt
3179 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
3180 _g_.sysblocktraced = true
3181 _g_.m.p.ptr().syscalltick++
3182
3183
3184 pc := getcallerpc()
3185 sp := getcallersp()
3186 save(pc, sp)
3187 _g_.syscallsp = _g_.sched.sp
3188 _g_.syscallpc = _g_.sched.pc
3189 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3190 sp1 := sp
3191 sp2 := _g_.sched.sp
3192 sp3 := _g_.syscallsp
3193 systemstack(func() {
3194 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3195 throw("entersyscallblock")
3196 })
3197 }
3198 casgstatus(_g_, _Grunning, _Gsyscall)
3199 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3200 systemstack(func() {
3201 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3202 throw("entersyscallblock")
3203 })
3204 }
3205
3206 systemstack(entersyscallblock_handoff)
3207
3208
3209 save(getcallerpc(), getcallersp())
3210
3211 _g_.m.locks--
3212 }
3213
3214 func entersyscallblock_handoff() {
3215 if trace.enabled {
3216 traceGoSysCall()
3217 traceGoSysBlock(getg().m.p.ptr())
3218 }
3219 handoffp(releasep())
3220 }
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234 func exitsyscall() {
3235 _g_ := getg()
3236
3237 _g_.m.locks++
3238 if getcallersp() > _g_.syscallsp {
3239 throw("exitsyscall: syscall frame is no longer valid")
3240 }
3241
3242 _g_.waitsince = 0
3243 oldp := _g_.m.oldp.ptr()
3244 _g_.m.oldp = 0
3245 if exitsyscallfast(oldp) {
3246 if trace.enabled {
3247 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
3248 systemstack(traceGoStart)
3249 }
3250 }
3251
3252 _g_.m.p.ptr().syscalltick++
3253
3254 casgstatus(_g_, _Gsyscall, _Grunning)
3255
3256
3257
3258 _g_.syscallsp = 0
3259 _g_.m.locks--
3260 if _g_.preempt {
3261
3262 _g_.stackguard0 = stackPreempt
3263 } else {
3264
3265 _g_.stackguard0 = _g_.stack.lo + _StackGuard
3266 }
3267 _g_.throwsplit = false
3268
3269 if sched.disable.user && !schedEnabled(_g_) {
3270
3271 Gosched()
3272 }
3273
3274 return
3275 }
3276
3277 _g_.sysexitticks = 0
3278 if trace.enabled {
3279
3280
3281 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
3282 osyield()
3283 }
3284
3285
3286
3287
3288 _g_.sysexitticks = cputicks()
3289 }
3290
3291 _g_.m.locks--
3292
3293
3294 mcall(exitsyscall0)
3295
3296
3297
3298
3299
3300
3301
3302 _g_.syscallsp = 0
3303 _g_.m.p.ptr().syscalltick++
3304 _g_.throwsplit = false
3305 }
3306
3307
3308 func exitsyscallfast(oldp *p) bool {
3309 _g_ := getg()
3310
3311
3312 if sched.stopwait == freezeStopWait {
3313 return false
3314 }
3315
3316
3317 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
3318
3319 wirep(oldp)
3320 exitsyscallfast_reacquired()
3321 return true
3322 }
3323
3324
3325 if sched.pidle != 0 {
3326 var ok bool
3327 systemstack(func() {
3328 ok = exitsyscallfast_pidle()
3329 if ok && trace.enabled {
3330 if oldp != nil {
3331
3332
3333 for oldp.syscalltick == _g_.m.syscalltick {
3334 osyield()
3335 }
3336 }
3337 traceGoSysExit(0)
3338 }
3339 })
3340 if ok {
3341 return true
3342 }
3343 }
3344 return false
3345 }
3346
3347
3348
3349
3350
3351
3352 func exitsyscallfast_reacquired() {
3353 _g_ := getg()
3354 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
3355 if trace.enabled {
3356
3357
3358
3359 systemstack(func() {
3360
3361 traceGoSysBlock(_g_.m.p.ptr())
3362
3363 traceGoSysExit(0)
3364 })
3365 }
3366 _g_.m.p.ptr().syscalltick++
3367 }
3368 }
3369
3370 func exitsyscallfast_pidle() bool {
3371 lock(&sched.lock)
3372 _p_ := pidleget()
3373 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
3374 atomic.Store(&sched.sysmonwait, 0)
3375 notewakeup(&sched.sysmonnote)
3376 }
3377 unlock(&sched.lock)
3378 if _p_ != nil {
3379 acquirep(_p_)
3380 return true
3381 }
3382 return false
3383 }
3384
3385
3386
3387
3388
3389 func exitsyscall0(gp *g) {
3390 _g_ := getg()
3391
3392 casgstatus(gp, _Gsyscall, _Grunnable)
3393 dropg()
3394 lock(&sched.lock)
3395 var _p_ *p
3396 if schedEnabled(_g_) {
3397 _p_ = pidleget()
3398 }
3399 if _p_ == nil {
3400 globrunqput(gp)
3401 } else if atomic.Load(&sched.sysmonwait) != 0 {
3402 atomic.Store(&sched.sysmonwait, 0)
3403 notewakeup(&sched.sysmonnote)
3404 }
3405 unlock(&sched.lock)
3406 if _p_ != nil {
3407 acquirep(_p_)
3408 execute(gp, false)
3409 }
3410 if _g_.m.lockedg != 0 {
3411
3412 stoplockedm()
3413 execute(gp, false)
3414 }
3415 stopm()
3416 schedule()
3417 }
3418
3419 func beforefork() {
3420 gp := getg().m.curg
3421
3422
3423
3424
3425 gp.m.locks++
3426 sigsave(&gp.m.sigmask)
3427 sigblock()
3428
3429
3430
3431
3432
3433 gp.stackguard0 = stackFork
3434 }
3435
3436
3437
3438
3439 func syscall_runtime_BeforeFork() {
3440 systemstack(beforefork)
3441 }
3442
3443 func afterfork() {
3444 gp := getg().m.curg
3445
3446
3447 gp.stackguard0 = gp.stack.lo + _StackGuard
3448
3449 msigrestore(gp.m.sigmask)
3450
3451 gp.m.locks--
3452 }
3453
3454
3455
3456
3457 func syscall_runtime_AfterFork() {
3458 systemstack(afterfork)
3459 }
3460
3461
3462
3463 var inForkedChild bool
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476 func syscall_runtime_AfterForkInChild() {
3477
3478
3479
3480
3481 inForkedChild = true
3482
3483 clearSignalHandlers()
3484
3485
3486
3487 msigrestore(getg().m.sigmask)
3488
3489 inForkedChild = false
3490 }
3491
3492
3493
3494
3495 var pendingPreemptSignals uint32
3496
3497
3498
3499 func syscall_runtime_BeforeExec() {
3500
3501 execLock.lock()
3502
3503
3504
3505 if GOOS == "darwin" {
3506 for int32(atomic.Load(&pendingPreemptSignals)) > 0 {
3507 osyield()
3508 }
3509 }
3510 }
3511
3512
3513
3514 func syscall_runtime_AfterExec() {
3515 execLock.unlock()
3516 }
3517
3518
3519 func malg(stacksize int32) *g {
3520 newg := new(g)
3521 if stacksize >= 0 {
3522 stacksize = round2(_StackSystem + stacksize)
3523 systemstack(func() {
3524 newg.stack = stackalloc(uint32(stacksize))
3525 })
3526 newg.stackguard0 = newg.stack.lo + _StackGuard
3527 newg.stackguard1 = ^uintptr(0)
3528
3529
3530 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
3531 }
3532 return newg
3533 }
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550 func newproc(siz int32, fn *funcval) {
3551 argp := add(unsafe.Pointer(&fn), sys.PtrSize)
3552 gp := getg()
3553 pc := getcallerpc()
3554 systemstack(func() {
3555 newg := newproc1(fn, argp, siz, gp, pc)
3556
3557 _p_ := getg().m.p.ptr()
3558 runqput(_p_, newg, true)
3559
3560 if mainStarted {
3561 wakep()
3562 }
3563 })
3564 }
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575 func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerpc uintptr) *g {
3576 _g_ := getg()
3577
3578 if fn == nil {
3579 _g_.m.throwing = -1
3580 throw("go of nil func value")
3581 }
3582 acquirem()
3583 siz := narg
3584 siz = (siz + 7) &^ 7
3585
3586
3587
3588
3589
3590 if siz >= _StackMin-4*sys.RegSize-sys.RegSize {
3591 throw("newproc: function arguments too large for new goroutine")
3592 }
3593
3594 _p_ := _g_.m.p.ptr()
3595 newg := gfget(_p_)
3596 if newg == nil {
3597 newg = malg(_StackMin)
3598 casgstatus(newg, _Gidle, _Gdead)
3599 allgadd(newg)
3600 }
3601 if newg.stack.hi == 0 {
3602 throw("newproc1: newg missing stack")
3603 }
3604
3605 if readgstatus(newg) != _Gdead {
3606 throw("newproc1: new g is not Gdead")
3607 }
3608
3609 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize
3610 totalSize += -totalSize & (sys.SpAlign - 1)
3611 sp := newg.stack.hi - totalSize
3612 spArg := sp
3613 if usesLR {
3614
3615 *(*uintptr)(unsafe.Pointer(sp)) = 0
3616 prepGoExitFrame(sp)
3617 spArg += sys.MinFrameSize
3618 }
3619 if narg > 0 {
3620 memmove(unsafe.Pointer(spArg), argp, uintptr(narg))
3621
3622
3623
3624
3625
3626
3627 if writeBarrier.needed && !_g_.m.curg.gcscandone {
3628 f := findfunc(fn.fn)
3629 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
3630 if stkmap.nbit > 0 {
3631
3632 bv := stackmapdata(stkmap, 0)
3633 bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata)
3634 }
3635 }
3636 }
3637
3638 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
3639 newg.sched.sp = sp
3640 newg.stktopsp = sp
3641 newg.sched.pc = funcPC(goexit) + sys.PCQuantum
3642 newg.sched.g = guintptr(unsafe.Pointer(newg))
3643 gostartcallfn(&newg.sched, fn)
3644 newg.gopc = callerpc
3645 newg.ancestors = saveAncestors(callergp)
3646 newg.startpc = fn.fn
3647 if _g_.m.curg != nil {
3648 newg.labels = _g_.m.curg.labels
3649 }
3650 if isSystemGoroutine(newg, false) {
3651 atomic.Xadd(&sched.ngsys, +1)
3652 }
3653 casgstatus(newg, _Gdead, _Grunnable)
3654
3655 if _p_.goidcache == _p_.goidcacheend {
3656
3657
3658
3659 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
3660 _p_.goidcache -= _GoidCacheBatch - 1
3661 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
3662 }
3663 newg.goid = int64(_p_.goidcache)
3664 _p_.goidcache++
3665 if raceenabled {
3666 newg.racectx = racegostart(callerpc)
3667 }
3668 if trace.enabled {
3669 traceGoCreate(newg, newg.startpc)
3670 }
3671 releasem(_g_.m)
3672
3673 return newg
3674 }
3675
3676
3677
3678
3679 func saveAncestors(callergp *g) *[]ancestorInfo {
3680
3681 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
3682 return nil
3683 }
3684 var callerAncestors []ancestorInfo
3685 if callergp.ancestors != nil {
3686 callerAncestors = *callergp.ancestors
3687 }
3688 n := int32(len(callerAncestors)) + 1
3689 if n > debug.tracebackancestors {
3690 n = debug.tracebackancestors
3691 }
3692 ancestors := make([]ancestorInfo, n)
3693 copy(ancestors[1:], callerAncestors)
3694
3695 var pcs [_TracebackMaxFrames]uintptr
3696 npcs := gcallers(callergp, 0, pcs[:])
3697 ipcs := make([]uintptr, npcs)
3698 copy(ipcs, pcs[:])
3699 ancestors[0] = ancestorInfo{
3700 pcs: ipcs,
3701 goid: callergp.goid,
3702 gopc: callergp.gopc,
3703 }
3704
3705 ancestorsp := new([]ancestorInfo)
3706 *ancestorsp = ancestors
3707 return ancestorsp
3708 }
3709
3710
3711
3712 func gfput(_p_ *p, gp *g) {
3713 if readgstatus(gp) != _Gdead {
3714 throw("gfput: bad status (not Gdead)")
3715 }
3716
3717 stksize := gp.stack.hi - gp.stack.lo
3718
3719 if stksize != _FixedStack {
3720
3721 stackfree(gp.stack)
3722 gp.stack.lo = 0
3723 gp.stack.hi = 0
3724 gp.stackguard0 = 0
3725 }
3726
3727 _p_.gFree.push(gp)
3728 _p_.gFree.n++
3729 if _p_.gFree.n >= 64 {
3730 lock(&sched.gFree.lock)
3731 for _p_.gFree.n >= 32 {
3732 _p_.gFree.n--
3733 gp = _p_.gFree.pop()
3734 if gp.stack.lo == 0 {
3735 sched.gFree.noStack.push(gp)
3736 } else {
3737 sched.gFree.stack.push(gp)
3738 }
3739 sched.gFree.n++
3740 }
3741 unlock(&sched.gFree.lock)
3742 }
3743 }
3744
3745
3746
3747 func gfget(_p_ *p) *g {
3748 retry:
3749 if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
3750 lock(&sched.gFree.lock)
3751
3752 for _p_.gFree.n < 32 {
3753
3754 gp := sched.gFree.stack.pop()
3755 if gp == nil {
3756 gp = sched.gFree.noStack.pop()
3757 if gp == nil {
3758 break
3759 }
3760 }
3761 sched.gFree.n--
3762 _p_.gFree.push(gp)
3763 _p_.gFree.n++
3764 }
3765 unlock(&sched.gFree.lock)
3766 goto retry
3767 }
3768 gp := _p_.gFree.pop()
3769 if gp == nil {
3770 return nil
3771 }
3772 _p_.gFree.n--
3773 if gp.stack.lo == 0 {
3774
3775 systemstack(func() {
3776 gp.stack = stackalloc(_FixedStack)
3777 })
3778 gp.stackguard0 = gp.stack.lo + _StackGuard
3779 } else {
3780 if raceenabled {
3781 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
3782 }
3783 if msanenabled {
3784 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
3785 }
3786 }
3787 return gp
3788 }
3789
3790
3791 func gfpurge(_p_ *p) {
3792 lock(&sched.gFree.lock)
3793 for !_p_.gFree.empty() {
3794 gp := _p_.gFree.pop()
3795 _p_.gFree.n--
3796 if gp.stack.lo == 0 {
3797 sched.gFree.noStack.push(gp)
3798 } else {
3799 sched.gFree.stack.push(gp)
3800 }
3801 sched.gFree.n++
3802 }
3803 unlock(&sched.gFree.lock)
3804 }
3805
3806
3807 func Breakpoint() {
3808 breakpoint()
3809 }
3810
3811
3812
3813
3814
3815 func dolockOSThread() {
3816 if GOARCH == "wasm" {
3817 return
3818 }
3819 _g_ := getg()
3820 _g_.m.lockedg.set(_g_)
3821 _g_.lockedm.set(_g_.m)
3822 }
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840 func LockOSThread() {
3841 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
3842
3843
3844
3845 startTemplateThread()
3846 }
3847 _g_ := getg()
3848 _g_.m.lockedExt++
3849 if _g_.m.lockedExt == 0 {
3850 _g_.m.lockedExt--
3851 panic("LockOSThread nesting overflow")
3852 }
3853 dolockOSThread()
3854 }
3855
3856
3857 func lockOSThread() {
3858 getg().m.lockedInt++
3859 dolockOSThread()
3860 }
3861
3862
3863
3864
3865
3866 func dounlockOSThread() {
3867 if GOARCH == "wasm" {
3868 return
3869 }
3870 _g_ := getg()
3871 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
3872 return
3873 }
3874 _g_.m.lockedg = 0
3875 _g_.lockedm = 0
3876 }
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892 func UnlockOSThread() {
3893 _g_ := getg()
3894 if _g_.m.lockedExt == 0 {
3895 return
3896 }
3897 _g_.m.lockedExt--
3898 dounlockOSThread()
3899 }
3900
3901
3902 func unlockOSThread() {
3903 _g_ := getg()
3904 if _g_.m.lockedInt == 0 {
3905 systemstack(badunlockosthread)
3906 }
3907 _g_.m.lockedInt--
3908 dounlockOSThread()
3909 }
3910
3911 func badunlockosthread() {
3912 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
3913 }
3914
3915 func gcount() int32 {
3916 n := int32(allglen) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
3917 for _, _p_ := range allp {
3918 n -= _p_.gFree.n
3919 }
3920
3921
3922
3923 if n < 1 {
3924 n = 1
3925 }
3926 return n
3927 }
3928
3929 func mcount() int32 {
3930 return int32(sched.mnext - sched.nmfreed)
3931 }
3932
3933 var prof struct {
3934 signalLock uint32
3935 hz int32
3936 }
3937
3938 func _System() { _System() }
3939 func _ExternalCode() { _ExternalCode() }
3940 func _LostExternalCode() { _LostExternalCode() }
3941 func _GC() { _GC() }
3942 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
3943 func _VDSO() { _VDSO() }
3944
3945
3946
3947
3948 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
3949 if prof.hz == 0 {
3950 return
3951 }
3952
3953
3954
3955
3956
3957
3958
3959 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
3960 if f := findfunc(pc); f.valid() {
3961 if hasPrefix(funcname(f), "runtime/internal/atomic") {
3962 cpuprof.lostAtomic++
3963 return
3964 }
3965 }
3966 }
3967
3968
3969
3970
3971
3972
3973
3974 getg().m.mallocing++
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041 traceback := true
4042 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) || (mp != nil && mp.vdsoSP != 0) {
4043 traceback = false
4044 }
4045 var stk [maxCPUProfStack]uintptr
4046 n := 0
4047 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
4048 cgoOff := 0
4049
4050
4051
4052
4053
4054 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
4055 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
4056 cgoOff++
4057 }
4058 copy(stk[:], mp.cgoCallers[:cgoOff])
4059 mp.cgoCallers[0] = 0
4060 }
4061
4062
4063 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0)
4064 if n > 0 {
4065 n += cgoOff
4066 }
4067 } else if traceback {
4068 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
4069 }
4070
4071 if n <= 0 {
4072
4073
4074 n = 0
4075 if (GOOS == "windows" || GOOS == "solaris" || GOOS == "illumos" || GOOS == "darwin" || GOOS == "aix") && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
4076
4077
4078 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
4079 }
4080 if n == 0 && mp != nil && mp.vdsoSP != 0 {
4081 n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
4082 }
4083 if n == 0 {
4084
4085 n = 2
4086 if inVDSOPage(pc) {
4087 pc = funcPC(_VDSO) + sys.PCQuantum
4088 } else if pc > firstmoduledata.etext {
4089
4090 pc = funcPC(_ExternalCode) + sys.PCQuantum
4091 }
4092 stk[0] = pc
4093 if mp.preemptoff != "" {
4094 stk[1] = funcPC(_GC) + sys.PCQuantum
4095 } else {
4096 stk[1] = funcPC(_System) + sys.PCQuantum
4097 }
4098 }
4099 }
4100
4101 if prof.hz != 0 {
4102 cpuprof.add(gp, stk[:n])
4103 }
4104 getg().m.mallocing--
4105 }
4106
4107
4108
4109
4110 var sigprofCallers cgoCallers
4111 var sigprofCallersUse uint32
4112
4113
4114
4115
4116
4117
4118
4119 func sigprofNonGo() {
4120 if prof.hz != 0 {
4121 n := 0
4122 for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
4123 n++
4124 }
4125 cpuprof.addNonGo(sigprofCallers[:n])
4126 }
4127
4128 atomic.Store(&sigprofCallersUse, 0)
4129 }
4130
4131
4132
4133
4134
4135
4136 func sigprofNonGoPC(pc uintptr) {
4137 if prof.hz != 0 {
4138 stk := []uintptr{
4139 pc,
4140 funcPC(_ExternalCode) + sys.PCQuantum,
4141 }
4142 cpuprof.addNonGo(stk)
4143 }
4144 }
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156 func setsSP(pc uintptr) bool {
4157 f := findfunc(pc)
4158 if !f.valid() {
4159
4160
4161 return true
4162 }
4163 switch f.funcID {
4164 case funcID_gogo, funcID_systemstack, funcID_mcall, funcID_morestack:
4165 return true
4166 }
4167 return false
4168 }
4169
4170
4171
4172 func setcpuprofilerate(hz int32) {
4173
4174 if hz < 0 {
4175 hz = 0
4176 }
4177
4178
4179
4180 _g_ := getg()
4181 _g_.m.locks++
4182
4183
4184
4185
4186 setThreadCPUProfiler(0)
4187
4188 for !atomic.Cas(&prof.signalLock, 0, 1) {
4189 osyield()
4190 }
4191 if prof.hz != hz {
4192 setProcessCPUProfiler(hz)
4193 prof.hz = hz
4194 }
4195 atomic.Store(&prof.signalLock, 0)
4196
4197 lock(&sched.lock)
4198 sched.profilehz = hz
4199 unlock(&sched.lock)
4200
4201 if hz != 0 {
4202 setThreadCPUProfiler(hz)
4203 }
4204
4205 _g_.m.locks--
4206 }
4207
4208
4209
4210 func (pp *p) init(id int32) {
4211 pp.id = id
4212 pp.status = _Pgcstop
4213 pp.sudogcache = pp.sudogbuf[:0]
4214 for i := range pp.deferpool {
4215 pp.deferpool[i] = pp.deferpoolbuf[i][:0]
4216 }
4217 pp.wbBuf.reset()
4218 if pp.mcache == nil {
4219 if id == 0 {
4220 if mcache0 == nil {
4221 throw("missing mcache?")
4222 }
4223
4224
4225 pp.mcache = mcache0
4226 } else {
4227 pp.mcache = allocmcache()
4228 }
4229 }
4230 if raceenabled && pp.raceprocctx == 0 {
4231 if id == 0 {
4232 pp.raceprocctx = raceprocctx0
4233 raceprocctx0 = 0
4234 } else {
4235 pp.raceprocctx = raceproccreate()
4236 }
4237 }
4238 lockInit(&pp.timersLock, lockRankTimers)
4239 }
4240
4241
4242
4243
4244
4245 func (pp *p) destroy() {
4246
4247 for pp.runqhead != pp.runqtail {
4248
4249 pp.runqtail--
4250 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
4251
4252 globrunqputhead(gp)
4253 }
4254 if pp.runnext != 0 {
4255 globrunqputhead(pp.runnext.ptr())
4256 pp.runnext = 0
4257 }
4258 if len(pp.timers) > 0 {
4259 plocal := getg().m.p.ptr()
4260
4261
4262
4263
4264 lock(&plocal.timersLock)
4265 lock(&pp.timersLock)
4266 moveTimers(plocal, pp.timers)
4267 pp.timers = nil
4268 pp.numTimers = 0
4269 pp.adjustTimers = 0
4270 pp.deletedTimers = 0
4271 atomic.Store64(&pp.timer0When, 0)
4272 unlock(&pp.timersLock)
4273 unlock(&plocal.timersLock)
4274 }
4275
4276
4277 if gp := pp.gcBgMarkWorker.ptr(); gp != nil {
4278 casgstatus(gp, _Gwaiting, _Grunnable)
4279 if trace.enabled {
4280 traceGoUnpark(gp, 0)
4281 }
4282 globrunqput(gp)
4283
4284
4285 pp.gcBgMarkWorker.set(nil)
4286 }
4287
4288 if gcphase != _GCoff {
4289 wbBufFlush1(pp)
4290 pp.gcw.dispose()
4291 }
4292 for i := range pp.sudogbuf {
4293 pp.sudogbuf[i] = nil
4294 }
4295 pp.sudogcache = pp.sudogbuf[:0]
4296 for i := range pp.deferpool {
4297 for j := range pp.deferpoolbuf[i] {
4298 pp.deferpoolbuf[i][j] = nil
4299 }
4300 pp.deferpool[i] = pp.deferpoolbuf[i][:0]
4301 }
4302 systemstack(func() {
4303 for i := 0; i < pp.mspancache.len; i++ {
4304
4305 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
4306 }
4307 pp.mspancache.len = 0
4308 pp.pcache.flush(&mheap_.pages)
4309 })
4310 freemcache(pp.mcache)
4311 pp.mcache = nil
4312 gfpurge(pp)
4313 traceProcFree(pp)
4314 if raceenabled {
4315 if pp.timerRaceCtx != 0 {
4316
4317
4318
4319
4320
4321 mp := getg().m
4322 phold := mp.p.ptr()
4323 mp.p.set(pp)
4324
4325 racectxend(pp.timerRaceCtx)
4326 pp.timerRaceCtx = 0
4327
4328 mp.p.set(phold)
4329 }
4330 raceprocdestroy(pp.raceprocctx)
4331 pp.raceprocctx = 0
4332 }
4333 pp.gcAssistTime = 0
4334 pp.status = _Pdead
4335 }
4336
4337
4338
4339
4340
4341 func procresize(nprocs int32) *p {
4342 old := gomaxprocs
4343 if old < 0 || nprocs <= 0 {
4344 throw("procresize: invalid arg")
4345 }
4346 if trace.enabled {
4347 traceGomaxprocs(nprocs)
4348 }
4349
4350
4351 now := nanotime()
4352 if sched.procresizetime != 0 {
4353 sched.totaltime += int64(old) * (now - sched.procresizetime)
4354 }
4355 sched.procresizetime = now
4356
4357
4358 if nprocs > int32(len(allp)) {
4359
4360
4361 lock(&allpLock)
4362 if nprocs <= int32(cap(allp)) {
4363 allp = allp[:nprocs]
4364 } else {
4365 nallp := make([]*p, nprocs)
4366
4367
4368 copy(nallp, allp[:cap(allp)])
4369 allp = nallp
4370 }
4371 unlock(&allpLock)
4372 }
4373
4374
4375 for i := old; i < nprocs; i++ {
4376 pp := allp[i]
4377 if pp == nil {
4378 pp = new(p)
4379 }
4380 pp.init(i)
4381 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
4382 }
4383
4384 _g_ := getg()
4385 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
4386
4387 _g_.m.p.ptr().status = _Prunning
4388 _g_.m.p.ptr().mcache.prepareForSweep()
4389 } else {
4390
4391
4392
4393
4394
4395 if _g_.m.p != 0 {
4396 if trace.enabled {
4397
4398
4399
4400 traceGoSched()
4401 traceProcStop(_g_.m.p.ptr())
4402 }
4403 _g_.m.p.ptr().m = 0
4404 }
4405 _g_.m.p = 0
4406 p := allp[0]
4407 p.m = 0
4408 p.status = _Pidle
4409 acquirep(p)
4410 if trace.enabled {
4411 traceGoStart()
4412 }
4413 }
4414
4415
4416 mcache0 = nil
4417
4418
4419 for i := nprocs; i < old; i++ {
4420 p := allp[i]
4421 p.destroy()
4422
4423 }
4424
4425
4426 if int32(len(allp)) != nprocs {
4427 lock(&allpLock)
4428 allp = allp[:nprocs]
4429 unlock(&allpLock)
4430 }
4431
4432 var runnablePs *p
4433 for i := nprocs - 1; i >= 0; i-- {
4434 p := allp[i]
4435 if _g_.m.p.ptr() == p {
4436 continue
4437 }
4438 p.status = _Pidle
4439 if runqempty(p) {
4440 pidleput(p)
4441 } else {
4442 p.m.set(mget())
4443 p.link.set(runnablePs)
4444 runnablePs = p
4445 }
4446 }
4447 stealOrder.reset(uint32(nprocs))
4448 var int32p *int32 = &gomaxprocs
4449 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
4450 return runnablePs
4451 }
4452
4453
4454
4455
4456
4457
4458
4459 func acquirep(_p_ *p) {
4460
4461 wirep(_p_)
4462
4463
4464
4465
4466
4467 _p_.mcache.prepareForSweep()
4468
4469 if trace.enabled {
4470 traceProcStart()
4471 }
4472 }
4473
4474
4475
4476
4477
4478
4479
4480 func wirep(_p_ *p) {
4481 _g_ := getg()
4482
4483 if _g_.m.p != 0 {
4484 throw("wirep: already in go")
4485 }
4486 if _p_.m != 0 || _p_.status != _Pidle {
4487 id := int64(0)
4488 if _p_.m != 0 {
4489 id = _p_.m.ptr().id
4490 }
4491 print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
4492 throw("wirep: invalid p state")
4493 }
4494 _g_.m.p.set(_p_)
4495 _p_.m.set(_g_.m)
4496 _p_.status = _Prunning
4497 }
4498
4499
4500 func releasep() *p {
4501 _g_ := getg()
4502
4503 if _g_.m.p == 0 {
4504 throw("releasep: invalid arg")
4505 }
4506 _p_ := _g_.m.p.ptr()
4507 if _p_.m.ptr() != _g_.m || _p_.status != _Prunning {
4508 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n")
4509 throw("releasep: invalid p state")
4510 }
4511 if trace.enabled {
4512 traceProcStop(_g_.m.p.ptr())
4513 }
4514 _g_.m.p = 0
4515 _p_.m = 0
4516 _p_.status = _Pidle
4517 return _p_
4518 }
4519
4520 func incidlelocked(v int32) {
4521 lock(&sched.lock)
4522 sched.nmidlelocked += v
4523 if v > 0 {
4524 checkdead()
4525 }
4526 unlock(&sched.lock)
4527 }
4528
4529
4530
4531
4532 func checkdead() {
4533
4534
4535
4536 if islibrary || isarchive {
4537 return
4538 }
4539
4540
4541
4542
4543
4544 if panicking > 0 {
4545 return
4546 }
4547
4548
4549
4550
4551
4552 var run0 int32
4553 if !iscgo && cgoHasExtraM {
4554 mp := lockextra(true)
4555 haveExtraM := extraMCount > 0
4556 unlockextra(mp)
4557 if haveExtraM {
4558 run0 = 1
4559 }
4560 }
4561
4562 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
4563 if run > run0 {
4564 return
4565 }
4566 if run < 0 {
4567 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
4568 throw("checkdead: inconsistent counts")
4569 }
4570
4571 grunning := 0
4572 lock(&allglock)
4573 for i := 0; i < len(allgs); i++ {
4574 gp := allgs[i]
4575 if isSystemGoroutine(gp, false) {
4576 continue
4577 }
4578 s := readgstatus(gp)
4579 switch s &^ _Gscan {
4580 case _Gwaiting,
4581 _Gpreempted:
4582 grunning++
4583 case _Grunnable,
4584 _Grunning,
4585 _Gsyscall:
4586 unlock(&allglock)
4587 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
4588 throw("checkdead: runnable g")
4589 }
4590 }
4591 unlock(&allglock)
4592 if grunning == 0 {
4593 unlock(&sched.lock)
4594 throw("no goroutines (main called runtime.Goexit) - deadlock!")
4595 }
4596
4597
4598 if faketime != 0 {
4599 when, _p_ := timeSleepUntil()
4600 if _p_ != nil {
4601 faketime = when
4602 for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link {
4603 if (*pp).ptr() == _p_ {
4604 *pp = _p_.link
4605 break
4606 }
4607 }
4608 mp := mget()
4609 if mp == nil {
4610
4611
4612 throw("checkdead: no m for timer")
4613 }
4614 mp.nextp.set(_p_)
4615 notewakeup(&mp.park)
4616 return
4617 }
4618 }
4619
4620
4621 for _, _p_ := range allp {
4622 if len(_p_.timers) > 0 {
4623 return
4624 }
4625 }
4626
4627 getg().m.throwing = -1
4628 unlock(&sched.lock)
4629 throw("all goroutines are asleep - deadlock!")
4630 }
4631
4632
4633
4634
4635
4636
4637 var forcegcperiod int64 = 2 * 60 * 1e9
4638
4639
4640
4641
4642 func sysmon() {
4643 lock(&sched.lock)
4644 sched.nmsys++
4645 checkdead()
4646 unlock(&sched.lock)
4647
4648 lasttrace := int64(0)
4649 idle := 0
4650 delay := uint32(0)
4651 for {
4652 if idle == 0 {
4653 delay = 20
4654 } else if idle > 50 {
4655 delay *= 2
4656 }
4657 if delay > 10*1000 {
4658 delay = 10 * 1000
4659 }
4660 usleep(delay)
4661 now := nanotime()
4662 next, _ := timeSleepUntil()
4663 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
4664 lock(&sched.lock)
4665 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
4666 if next > now {
4667 atomic.Store(&sched.sysmonwait, 1)
4668 unlock(&sched.lock)
4669
4670
4671 sleep := forcegcperiod / 2
4672 if next-now < sleep {
4673 sleep = next - now
4674 }
4675 shouldRelax := sleep >= osRelaxMinNS
4676 if shouldRelax {
4677 osRelax(true)
4678 }
4679 notetsleep(&sched.sysmonnote, sleep)
4680 if shouldRelax {
4681 osRelax(false)
4682 }
4683 now = nanotime()
4684 next, _ = timeSleepUntil()
4685 lock(&sched.lock)
4686 atomic.Store(&sched.sysmonwait, 0)
4687 noteclear(&sched.sysmonnote)
4688 }
4689 idle = 0
4690 delay = 20
4691 }
4692 unlock(&sched.lock)
4693 }
4694 lock(&sched.sysmonlock)
4695 {
4696
4697
4698
4699 now1 := nanotime()
4700 if now1-now > 50*1000 {
4701 next, _ = timeSleepUntil()
4702 }
4703 now = now1
4704 }
4705
4706
4707 if *cgo_yield != nil {
4708 asmcgocall(*cgo_yield, nil)
4709 }
4710
4711 lastpoll := int64(atomic.Load64(&sched.lastpoll))
4712 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
4713 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
4714 list := netpoll(0)
4715 if !list.empty() {
4716
4717
4718
4719
4720
4721
4722
4723 incidlelocked(-1)
4724 injectglist(&list)
4725 incidlelocked(1)
4726 }
4727 }
4728 if next < now {
4729
4730
4731
4732 startm(nil, false)
4733 }
4734 if atomic.Load(&scavenge.sysmonWake) != 0 {
4735
4736 wakeScavenger()
4737 }
4738
4739
4740 if retake(now) != 0 {
4741 idle = 0
4742 } else {
4743 idle++
4744 }
4745
4746 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
4747 lock(&forcegc.lock)
4748 forcegc.idle = 0
4749 var list gList
4750 list.push(forcegc.g)
4751 injectglist(&list)
4752 unlock(&forcegc.lock)
4753 }
4754 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
4755 lasttrace = now
4756 schedtrace(debug.scheddetail > 0)
4757 }
4758 unlock(&sched.sysmonlock)
4759 }
4760 }
4761
4762 type sysmontick struct {
4763 schedtick uint32
4764 schedwhen int64
4765 syscalltick uint32
4766 syscallwhen int64
4767 }
4768
4769
4770
4771 const forcePreemptNS = 10 * 1000 * 1000
4772
4773 func retake(now int64) uint32 {
4774 n := 0
4775
4776
4777 lock(&allpLock)
4778
4779
4780
4781 for i := 0; i < len(allp); i++ {
4782 _p_ := allp[i]
4783 if _p_ == nil {
4784
4785
4786 continue
4787 }
4788 pd := &_p_.sysmontick
4789 s := _p_.status
4790 sysretake := false
4791 if s == _Prunning || s == _Psyscall {
4792
4793 t := int64(_p_.schedtick)
4794 if int64(pd.schedtick) != t {
4795 pd.schedtick = uint32(t)
4796 pd.schedwhen = now
4797 } else if pd.schedwhen+forcePreemptNS <= now {
4798 preemptone(_p_)
4799
4800
4801 sysretake = true
4802 }
4803 }
4804 if s == _Psyscall {
4805
4806 t := int64(_p_.syscalltick)
4807 if !sysretake && int64(pd.syscalltick) != t {
4808 pd.syscalltick = uint32(t)
4809 pd.syscallwhen = now
4810 continue
4811 }
4812
4813
4814
4815 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
4816 continue
4817 }
4818
4819 unlock(&allpLock)
4820
4821
4822
4823
4824 incidlelocked(-1)
4825 if atomic.Cas(&_p_.status, s, _Pidle) {
4826 if trace.enabled {
4827 traceGoSysBlock(_p_)
4828 traceProcStop(_p_)
4829 }
4830 n++
4831 _p_.syscalltick++
4832 handoffp(_p_)
4833 }
4834 incidlelocked(1)
4835 lock(&allpLock)
4836 }
4837 }
4838 unlock(&allpLock)
4839 return uint32(n)
4840 }
4841
4842
4843
4844
4845
4846
4847 func preemptall() bool {
4848 res := false
4849 for _, _p_ := range allp {
4850 if _p_.status != _Prunning {
4851 continue
4852 }
4853 if preemptone(_p_) {
4854 res = true
4855 }
4856 }
4857 return res
4858 }
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870 func preemptone(_p_ *p) bool {
4871 mp := _p_.m.ptr()
4872 if mp == nil || mp == getg().m {
4873 return false
4874 }
4875 gp := mp.curg
4876 if gp == nil || gp == mp.g0 {
4877 return false
4878 }
4879
4880 gp.preempt = true
4881
4882
4883
4884
4885
4886 gp.stackguard0 = stackPreempt
4887
4888
4889 if preemptMSupported && debug.asyncpreemptoff == 0 {
4890 _p_.preempt = true
4891 preemptM(mp)
4892 }
4893
4894 return true
4895 }
4896
4897 var starttime int64
4898
4899 func schedtrace(detailed bool) {
4900 now := nanotime()
4901 if starttime == 0 {
4902 starttime = now
4903 }
4904
4905 lock(&sched.lock)
4906 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
4907 if detailed {
4908 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
4909 }
4910
4911
4912
4913 for i, _p_ := range allp {
4914 mp := _p_.m.ptr()
4915 h := atomic.Load(&_p_.runqhead)
4916 t := atomic.Load(&_p_.runqtail)
4917 if detailed {
4918 id := int64(-1)
4919 if mp != nil {
4920 id = mp.id
4921 }
4922 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, " timerslen=", len(_p_.timers), "\n")
4923 } else {
4924
4925
4926 print(" ")
4927 if i == 0 {
4928 print("[")
4929 }
4930 print(t - h)
4931 if i == len(allp)-1 {
4932 print("]\n")
4933 }
4934 }
4935 }
4936
4937 if !detailed {
4938 unlock(&sched.lock)
4939 return
4940 }
4941
4942 for mp := allm; mp != nil; mp = mp.alllink {
4943 _p_ := mp.p.ptr()
4944 gp := mp.curg
4945 lockedg := mp.lockedg.ptr()
4946 id1 := int32(-1)
4947 if _p_ != nil {
4948 id1 = _p_.id
4949 }
4950 id2 := int64(-1)
4951 if gp != nil {
4952 id2 = gp.goid
4953 }
4954 id3 := int64(-1)
4955 if lockedg != nil {
4956 id3 = lockedg.goid
4957 }
4958 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
4959 }
4960
4961 lock(&allglock)
4962 for gi := 0; gi < len(allgs); gi++ {
4963 gp := allgs[gi]
4964 mp := gp.m
4965 lockedm := gp.lockedm.ptr()
4966 id1 := int64(-1)
4967 if mp != nil {
4968 id1 = mp.id
4969 }
4970 id2 := int64(-1)
4971 if lockedm != nil {
4972 id2 = lockedm.id
4973 }
4974 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n")
4975 }
4976 unlock(&allglock)
4977 unlock(&sched.lock)
4978 }
4979
4980
4981
4982
4983
4984
4985 func schedEnableUser(enable bool) {
4986 lock(&sched.lock)
4987 if sched.disable.user == !enable {
4988 unlock(&sched.lock)
4989 return
4990 }
4991 sched.disable.user = !enable
4992 if enable {
4993 n := sched.disable.n
4994 sched.disable.n = 0
4995 globrunqputbatch(&sched.disable.runnable, n)
4996 unlock(&sched.lock)
4997 for ; n != 0 && sched.npidle != 0; n-- {
4998 startm(nil, false)
4999 }
5000 } else {
5001 unlock(&sched.lock)
5002 }
5003 }
5004
5005
5006
5007 func schedEnabled(gp *g) bool {
5008 if sched.disable.user {
5009 return isSystemGoroutine(gp, true)
5010 }
5011 return true
5012 }
5013
5014
5015
5016
5017
5018 func mput(mp *m) {
5019 mp.schedlink = sched.midle
5020 sched.midle.set(mp)
5021 sched.nmidle++
5022 checkdead()
5023 }
5024
5025
5026
5027
5028
5029 func mget() *m {
5030 mp := sched.midle.ptr()
5031 if mp != nil {
5032 sched.midle = mp.schedlink
5033 sched.nmidle--
5034 }
5035 return mp
5036 }
5037
5038
5039
5040
5041
5042 func globrunqput(gp *g) {
5043 sched.runq.pushBack(gp)
5044 sched.runqsize++
5045 }
5046
5047
5048
5049
5050
5051 func globrunqputhead(gp *g) {
5052 sched.runq.push(gp)
5053 sched.runqsize++
5054 }
5055
5056
5057
5058
5059 func globrunqputbatch(batch *gQueue, n int32) {
5060 sched.runq.pushBackAll(*batch)
5061 sched.runqsize += n
5062 *batch = gQueue{}
5063 }
5064
5065
5066
5067 func globrunqget(_p_ *p, max int32) *g {
5068 if sched.runqsize == 0 {
5069 return nil
5070 }
5071
5072 n := sched.runqsize/gomaxprocs + 1
5073 if n > sched.runqsize {
5074 n = sched.runqsize
5075 }
5076 if max > 0 && n > max {
5077 n = max
5078 }
5079 if n > int32(len(_p_.runq))/2 {
5080 n = int32(len(_p_.runq)) / 2
5081 }
5082
5083 sched.runqsize -= n
5084
5085 gp := sched.runq.pop()
5086 n--
5087 for ; n > 0; n-- {
5088 gp1 := sched.runq.pop()
5089 runqput(_p_, gp1, false)
5090 }
5091 return gp
5092 }
5093
5094
5095
5096
5097
5098 func pidleput(_p_ *p) {
5099 if !runqempty(_p_) {
5100 throw("pidleput: P has non-empty run queue")
5101 }
5102 _p_.link = sched.pidle
5103 sched.pidle.set(_p_)
5104 atomic.Xadd(&sched.npidle, 1)
5105 }
5106
5107
5108
5109
5110
5111 func pidleget() *p {
5112 _p_ := sched.pidle.ptr()
5113 if _p_ != nil {
5114 sched.pidle = _p_.link
5115 atomic.Xadd(&sched.npidle, -1)
5116 }
5117 return _p_
5118 }
5119
5120
5121
5122 func runqempty(_p_ *p) bool {
5123
5124
5125
5126
5127 for {
5128 head := atomic.Load(&_p_.runqhead)
5129 tail := atomic.Load(&_p_.runqtail)
5130 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
5131 if tail == atomic.Load(&_p_.runqtail) {
5132 return head == tail && runnext == 0
5133 }
5134 }
5135 }
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146 const randomizeScheduler = raceenabled
5147
5148
5149
5150
5151
5152
5153 func runqput(_p_ *p, gp *g, next bool) {
5154 if randomizeScheduler && next && fastrand()%2 == 0 {
5155 next = false
5156 }
5157
5158 if next {
5159 retryNext:
5160 oldnext := _p_.runnext
5161 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
5162 goto retryNext
5163 }
5164 if oldnext == 0 {
5165 return
5166 }
5167
5168 gp = oldnext.ptr()
5169 }
5170
5171 retry:
5172 h := atomic.LoadAcq(&_p_.runqhead)
5173 t := _p_.runqtail
5174 if t-h < uint32(len(_p_.runq)) {
5175 _p_.runq[t%uint32(len(_p_.runq))].set(gp)
5176 atomic.StoreRel(&_p_.runqtail, t+1)
5177 return
5178 }
5179 if runqputslow(_p_, gp, h, t) {
5180 return
5181 }
5182
5183 goto retry
5184 }
5185
5186
5187
5188 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
5189 var batch [len(_p_.runq)/2 + 1]*g
5190
5191
5192 n := t - h
5193 n = n / 2
5194 if n != uint32(len(_p_.runq)/2) {
5195 throw("runqputslow: queue is not full")
5196 }
5197 for i := uint32(0); i < n; i++ {
5198 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
5199 }
5200 if !atomic.CasRel(&_p_.runqhead, h, h+n) {
5201 return false
5202 }
5203 batch[n] = gp
5204
5205 if randomizeScheduler {
5206 for i := uint32(1); i <= n; i++ {
5207 j := fastrandn(i + 1)
5208 batch[i], batch[j] = batch[j], batch[i]
5209 }
5210 }
5211
5212
5213 for i := uint32(0); i < n; i++ {
5214 batch[i].schedlink.set(batch[i+1])
5215 }
5216 var q gQueue
5217 q.head.set(batch[0])
5218 q.tail.set(batch[n])
5219
5220
5221 lock(&sched.lock)
5222 globrunqputbatch(&q, int32(n+1))
5223 unlock(&sched.lock)
5224 return true
5225 }
5226
5227
5228
5229
5230
5231 func runqputbatch(pp *p, q *gQueue, qsize int) {
5232 h := atomic.LoadAcq(&pp.runqhead)
5233 t := pp.runqtail
5234 n := uint32(0)
5235 for !q.empty() && t-h < uint32(len(pp.runq)) {
5236 gp := q.pop()
5237 pp.runq[t%uint32(len(pp.runq))].set(gp)
5238 t++
5239 n++
5240 }
5241 qsize -= int(n)
5242
5243 if randomizeScheduler {
5244 off := func(o uint32) uint32 {
5245 return (pp.runqtail + o) % uint32(len(pp.runq))
5246 }
5247 for i := uint32(1); i < n; i++ {
5248 j := fastrandn(i + 1)
5249 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
5250 }
5251 }
5252
5253 atomic.StoreRel(&pp.runqtail, t)
5254 if !q.empty() {
5255 lock(&sched.lock)
5256 globrunqputbatch(q, int32(qsize))
5257 unlock(&sched.lock)
5258 }
5259 }
5260
5261
5262
5263
5264
5265 func runqget(_p_ *p) (gp *g, inheritTime bool) {
5266
5267 for {
5268 next := _p_.runnext
5269 if next == 0 {
5270 break
5271 }
5272 if _p_.runnext.cas(next, 0) {
5273 return next.ptr(), true
5274 }
5275 }
5276
5277 for {
5278 h := atomic.LoadAcq(&_p_.runqhead)
5279 t := _p_.runqtail
5280 if t == h {
5281 return nil, false
5282 }
5283 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
5284 if atomic.CasRel(&_p_.runqhead, h, h+1) {
5285 return gp, false
5286 }
5287 }
5288 }
5289
5290
5291
5292
5293
5294 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
5295 for {
5296 h := atomic.LoadAcq(&_p_.runqhead)
5297 t := atomic.LoadAcq(&_p_.runqtail)
5298 n := t - h
5299 n = n - n/2
5300 if n == 0 {
5301 if stealRunNextG {
5302
5303 if next := _p_.runnext; next != 0 {
5304 if _p_.status == _Prunning {
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315 if GOOS != "windows" {
5316 usleep(3)
5317 } else {
5318
5319
5320
5321 osyield()
5322 }
5323 }
5324 if !_p_.runnext.cas(next, 0) {
5325 continue
5326 }
5327 batch[batchHead%uint32(len(batch))] = next
5328 return 1
5329 }
5330 }
5331 return 0
5332 }
5333 if n > uint32(len(_p_.runq)/2) {
5334 continue
5335 }
5336 for i := uint32(0); i < n; i++ {
5337 g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
5338 batch[(batchHead+i)%uint32(len(batch))] = g
5339 }
5340 if atomic.CasRel(&_p_.runqhead, h, h+n) {
5341 return n
5342 }
5343 }
5344 }
5345
5346
5347
5348
5349 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
5350 t := _p_.runqtail
5351 n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
5352 if n == 0 {
5353 return nil
5354 }
5355 n--
5356 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
5357 if n == 0 {
5358 return gp
5359 }
5360 h := atomic.LoadAcq(&_p_.runqhead)
5361 if t-h+n >= uint32(len(_p_.runq)) {
5362 throw("runqsteal: runq overflow")
5363 }
5364 atomic.StoreRel(&_p_.runqtail, t+n)
5365 return gp
5366 }
5367
5368
5369
5370 type gQueue struct {
5371 head guintptr
5372 tail guintptr
5373 }
5374
5375
5376 func (q *gQueue) empty() bool {
5377 return q.head == 0
5378 }
5379
5380
5381 func (q *gQueue) push(gp *g) {
5382 gp.schedlink = q.head
5383 q.head.set(gp)
5384 if q.tail == 0 {
5385 q.tail.set(gp)
5386 }
5387 }
5388
5389
5390 func (q *gQueue) pushBack(gp *g) {
5391 gp.schedlink = 0
5392 if q.tail != 0 {
5393 q.tail.ptr().schedlink.set(gp)
5394 } else {
5395 q.head.set(gp)
5396 }
5397 q.tail.set(gp)
5398 }
5399
5400
5401
5402 func (q *gQueue) pushBackAll(q2 gQueue) {
5403 if q2.tail == 0 {
5404 return
5405 }
5406 q2.tail.ptr().schedlink = 0
5407 if q.tail != 0 {
5408 q.tail.ptr().schedlink = q2.head
5409 } else {
5410 q.head = q2.head
5411 }
5412 q.tail = q2.tail
5413 }
5414
5415
5416
5417 func (q *gQueue) pop() *g {
5418 gp := q.head.ptr()
5419 if gp != nil {
5420 q.head = gp.schedlink
5421 if q.head == 0 {
5422 q.tail = 0
5423 }
5424 }
5425 return gp
5426 }
5427
5428
5429 func (q *gQueue) popList() gList {
5430 stack := gList{q.head}
5431 *q = gQueue{}
5432 return stack
5433 }
5434
5435
5436
5437 type gList struct {
5438 head guintptr
5439 }
5440
5441
5442 func (l *gList) empty() bool {
5443 return l.head == 0
5444 }
5445
5446
5447 func (l *gList) push(gp *g) {
5448 gp.schedlink = l.head
5449 l.head.set(gp)
5450 }
5451
5452
5453 func (l *gList) pushAll(q gQueue) {
5454 if !q.empty() {
5455 q.tail.ptr().schedlink = l.head
5456 l.head = q.head
5457 }
5458 }
5459
5460
5461 func (l *gList) pop() *g {
5462 gp := l.head.ptr()
5463 if gp != nil {
5464 l.head = gp.schedlink
5465 }
5466 return gp
5467 }
5468
5469
5470 func setMaxThreads(in int) (out int) {
5471 lock(&sched.lock)
5472 out = int(sched.maxmcount)
5473 if in > 0x7fffffff {
5474 sched.maxmcount = 0x7fffffff
5475 } else {
5476 sched.maxmcount = int32(in)
5477 }
5478 checkmcount()
5479 unlock(&sched.lock)
5480 return
5481 }
5482
5483 func haveexperiment(name string) bool {
5484 if name == "framepointer" {
5485 return framepointer_enabled
5486 }
5487 x := sys.Goexperiment
5488 for x != "" {
5489 xname := ""
5490 i := index(x, ",")
5491 if i < 0 {
5492 xname, x = x, ""
5493 } else {
5494 xname, x = x[:i], x[i+1:]
5495 }
5496 if xname == name {
5497 return true
5498 }
5499 if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name {
5500 return false
5501 }
5502 }
5503 return false
5504 }
5505
5506
5507 func procPin() int {
5508 _g_ := getg()
5509 mp := _g_.m
5510
5511 mp.locks++
5512 return int(mp.p.ptr().id)
5513 }
5514
5515
5516 func procUnpin() {
5517 _g_ := getg()
5518 _g_.m.locks--
5519 }
5520
5521
5522
5523 func sync_runtime_procPin() int {
5524 return procPin()
5525 }
5526
5527
5528
5529 func sync_runtime_procUnpin() {
5530 procUnpin()
5531 }
5532
5533
5534
5535 func sync_atomic_runtime_procPin() int {
5536 return procPin()
5537 }
5538
5539
5540
5541 func sync_atomic_runtime_procUnpin() {
5542 procUnpin()
5543 }
5544
5545
5546
5547
5548 func sync_runtime_canSpin(i int) bool {
5549
5550
5551
5552
5553
5554 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
5555 return false
5556 }
5557 if p := getg().m.p.ptr(); !runqempty(p) {
5558 return false
5559 }
5560 return true
5561 }
5562
5563
5564
5565 func sync_runtime_doSpin() {
5566 procyield(active_spin_cnt)
5567 }
5568
5569 var stealOrder randomOrder
5570
5571
5572
5573
5574
5575 type randomOrder struct {
5576 count uint32
5577 coprimes []uint32
5578 }
5579
5580 type randomEnum struct {
5581 i uint32
5582 count uint32
5583 pos uint32
5584 inc uint32
5585 }
5586
5587 func (ord *randomOrder) reset(count uint32) {
5588 ord.count = count
5589 ord.coprimes = ord.coprimes[:0]
5590 for i := uint32(1); i <= count; i++ {
5591 if gcd(i, count) == 1 {
5592 ord.coprimes = append(ord.coprimes, i)
5593 }
5594 }
5595 }
5596
5597 func (ord *randomOrder) start(i uint32) randomEnum {
5598 return randomEnum{
5599 count: ord.count,
5600 pos: i % ord.count,
5601 inc: ord.coprimes[i%uint32(len(ord.coprimes))],
5602 }
5603 }
5604
5605 func (enum *randomEnum) done() bool {
5606 return enum.i == enum.count
5607 }
5608
5609 func (enum *randomEnum) next() {
5610 enum.i++
5611 enum.pos = (enum.pos + enum.inc) % enum.count
5612 }
5613
5614 func (enum *randomEnum) position() uint32 {
5615 return enum.pos
5616 }
5617
5618 func gcd(a, b uint32) uint32 {
5619 for b != 0 {
5620 a, b = b, a%b
5621 }
5622 return a
5623 }
5624
5625
5626
5627 type initTask struct {
5628
5629 state uintptr
5630 ndeps uintptr
5631 nfns uintptr
5632
5633
5634 }
5635
5636 func doInit(t *initTask) {
5637 switch t.state {
5638 case 2:
5639 return
5640 case 1:
5641 throw("recursive call during initialization - linker skew")
5642 default:
5643 t.state = 1
5644 for i := uintptr(0); i < t.ndeps; i++ {
5645 p := add(unsafe.Pointer(t), (3+i)*sys.PtrSize)
5646 t2 := *(**initTask)(p)
5647 doInit(t2)
5648 }
5649 for i := uintptr(0); i < t.nfns; i++ {
5650 p := add(unsafe.Pointer(t), (3+t.ndeps+i)*sys.PtrSize)
5651 f := *(*func())(unsafe.Pointer(&p))
5652 f()
5653 }
5654 t.state = 2
5655 }
5656 }
5657
View as plain text