Source file
src/runtime/proc.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "internal/bytealg"
9 "internal/cpu"
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
12 "unsafe"
13 )
14
15 var buildVersion = sys.TheVersion
16
17
18 var modinfo string
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83 var (
84 m0 m
85 g0 g
86 mcache0 *mcache
87 raceprocctx0 uintptr
88 )
89
90
91 var runtime_inittask initTask
92
93
94 var main_inittask initTask
95
96
97
98
99
100 var main_init_done chan bool
101
102
103 func main_main()
104
105
106 var mainStarted bool
107
108
109 var runtimeInitTime int64
110
111
112 var initSigmask sigset
113
114
115 func main() {
116 g := getg()
117
118
119
120 g.m.g0.racectx = 0
121
122
123
124
125 if sys.PtrSize == 8 {
126 maxstacksize = 1000000000
127 } else {
128 maxstacksize = 250000000
129 }
130
131
132
133
134 maxstackceiling = 2 * maxstacksize
135
136
137 mainStarted = true
138
139 if GOARCH != "wasm" {
140
141
142
143 atomic.Store(&sched.sysmonStarting, 1)
144 systemstack(func() {
145 newm(sysmon, nil, -1)
146 })
147 }
148
149
150
151
152
153
154
155 lockOSThread()
156
157 if g.m != &m0 {
158 throw("runtime.main not on m0")
159 }
160 m0.doesPark = true
161
162
163
164 runtimeInitTime = nanotime()
165 if runtimeInitTime == 0 {
166 throw("nanotime returning zero")
167 }
168
169 if debug.inittrace != 0 {
170 inittrace.id = getg().goid
171 inittrace.active = true
172 }
173
174 doInit(&runtime_inittask)
175
176
177 needUnlock := true
178 defer func() {
179 if needUnlock {
180 unlockOSThread()
181 }
182 }()
183
184 gcenable()
185
186 main_init_done = make(chan bool)
187 if iscgo {
188 if _cgo_thread_start == nil {
189 throw("_cgo_thread_start missing")
190 }
191 if GOOS != "windows" {
192 if _cgo_setenv == nil {
193 throw("_cgo_setenv missing")
194 }
195 if _cgo_unsetenv == nil {
196 throw("_cgo_unsetenv missing")
197 }
198 }
199 if _cgo_notify_runtime_init_done == nil {
200 throw("_cgo_notify_runtime_init_done missing")
201 }
202
203
204 startTemplateThread()
205 cgocall(_cgo_notify_runtime_init_done, nil)
206 }
207
208 doInit(&main_inittask)
209
210
211
212 inittrace.active = false
213
214 close(main_init_done)
215
216 needUnlock = false
217 unlockOSThread()
218
219 if isarchive || islibrary {
220
221
222 return
223 }
224 fn := main_main
225 fn()
226 if raceenabled {
227 racefini()
228 }
229
230
231
232
233
234 if atomic.Load(&runningPanicDefers) != 0 {
235
236 for c := 0; c < 1000; c++ {
237 if atomic.Load(&runningPanicDefers) == 0 {
238 break
239 }
240 Gosched()
241 }
242 }
243 if atomic.Load(&panicking) != 0 {
244 gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
245 }
246
247 exit(0)
248 for {
249 var x *int32
250 *x = 0
251 }
252 }
253
254
255
256 func os_beforeExit() {
257 if raceenabled {
258 racefini()
259 }
260 }
261
262
263 func init() {
264 go forcegchelper()
265 }
266
267 func forcegchelper() {
268 forcegc.g = getg()
269 lockInit(&forcegc.lock, lockRankForcegc)
270 for {
271 lock(&forcegc.lock)
272 if forcegc.idle != 0 {
273 throw("forcegc: phase error")
274 }
275 atomic.Store(&forcegc.idle, 1)
276 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1)
277
278 if debug.gctrace > 0 {
279 println("GC forced")
280 }
281
282 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
283 }
284 }
285
286
287
288
289
290 func Gosched() {
291 checkTimeouts()
292 mcall(gosched_m)
293 }
294
295
296
297
298 func goschedguarded() {
299 mcall(goschedguarded_m)
300 }
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
320 if reason != waitReasonSleep {
321 checkTimeouts()
322 }
323 mp := acquirem()
324 gp := mp.curg
325 status := readgstatus(gp)
326 if status != _Grunning && status != _Gscanrunning {
327 throw("gopark: bad g status")
328 }
329 mp.waitlock = lock
330 mp.waitunlockf = unlockf
331 gp.waitreason = reason
332 mp.waittraceev = traceEv
333 mp.waittraceskip = traceskip
334 releasem(mp)
335
336 mcall(park_m)
337 }
338
339
340
341 func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
342 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
343 }
344
345 func goready(gp *g, traceskip int) {
346 systemstack(func() {
347 ready(gp, traceskip, true)
348 })
349 }
350
351
352 func acquireSudog() *sudog {
353
354
355
356
357
358
359
360
361 mp := acquirem()
362 pp := mp.p.ptr()
363 if len(pp.sudogcache) == 0 {
364 lock(&sched.sudoglock)
365
366 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
367 s := sched.sudogcache
368 sched.sudogcache = s.next
369 s.next = nil
370 pp.sudogcache = append(pp.sudogcache, s)
371 }
372 unlock(&sched.sudoglock)
373
374 if len(pp.sudogcache) == 0 {
375 pp.sudogcache = append(pp.sudogcache, new(sudog))
376 }
377 }
378 n := len(pp.sudogcache)
379 s := pp.sudogcache[n-1]
380 pp.sudogcache[n-1] = nil
381 pp.sudogcache = pp.sudogcache[:n-1]
382 if s.elem != nil {
383 throw("acquireSudog: found s.elem != nil in cache")
384 }
385 releasem(mp)
386 return s
387 }
388
389
390 func releaseSudog(s *sudog) {
391 if s.elem != nil {
392 throw("runtime: sudog with non-nil elem")
393 }
394 if s.isSelect {
395 throw("runtime: sudog with non-false isSelect")
396 }
397 if s.next != nil {
398 throw("runtime: sudog with non-nil next")
399 }
400 if s.prev != nil {
401 throw("runtime: sudog with non-nil prev")
402 }
403 if s.waitlink != nil {
404 throw("runtime: sudog with non-nil waitlink")
405 }
406 if s.c != nil {
407 throw("runtime: sudog with non-nil c")
408 }
409 gp := getg()
410 if gp.param != nil {
411 throw("runtime: releaseSudog with non-nil gp.param")
412 }
413 mp := acquirem()
414 pp := mp.p.ptr()
415 if len(pp.sudogcache) == cap(pp.sudogcache) {
416
417 var first, last *sudog
418 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
419 n := len(pp.sudogcache)
420 p := pp.sudogcache[n-1]
421 pp.sudogcache[n-1] = nil
422 pp.sudogcache = pp.sudogcache[:n-1]
423 if first == nil {
424 first = p
425 } else {
426 last.next = p
427 }
428 last = p
429 }
430 lock(&sched.sudoglock)
431 last.next = sched.sudogcache
432 sched.sudogcache = first
433 unlock(&sched.sudoglock)
434 }
435 pp.sudogcache = append(pp.sudogcache, s)
436 releasem(mp)
437 }
438
439
440
441
442
443
444
445
446
447 func funcPC(f interface{}) uintptr {
448 return *(*uintptr)(efaceOf(&f).data)
449 }
450
451
452 func badmcall(fn func(*g)) {
453 throw("runtime: mcall called on m->g0 stack")
454 }
455
456 func badmcall2(fn func(*g)) {
457 throw("runtime: mcall function returned")
458 }
459
460 func badreflectcall() {
461 panic(plainError("arg size to reflect.call more than 1GB"))
462 }
463
464 var badmorestackg0Msg = "fatal: morestack on g0\n"
465
466
467
468 func badmorestackg0() {
469 sp := stringStructOf(&badmorestackg0Msg)
470 write(2, sp.str, int32(sp.len))
471 }
472
473 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
474
475
476
477 func badmorestackgsignal() {
478 sp := stringStructOf(&badmorestackgsignalMsg)
479 write(2, sp.str, int32(sp.len))
480 }
481
482
483 func badctxt() {
484 throw("ctxt != 0")
485 }
486
487 func lockedOSThread() bool {
488 gp := getg()
489 return gp.lockedm != 0 && gp.m.lockedg != 0
490 }
491
492 var (
493
494
495
496
497
498
499 allglock mutex
500 allgs []*g
501
502
503
504
505
506
507
508
509
510
511
512
513
514 allglen uintptr
515 allgptr **g
516 )
517
518 func allgadd(gp *g) {
519 if readgstatus(gp) == _Gidle {
520 throw("allgadd: bad status Gidle")
521 }
522
523 lock(&allglock)
524 allgs = append(allgs, gp)
525 if &allgs[0] != allgptr {
526 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
527 }
528 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
529 unlock(&allglock)
530 }
531
532
533 func atomicAllG() (**g, uintptr) {
534 length := atomic.Loaduintptr(&allglen)
535 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
536 return ptr, length
537 }
538
539
540 func atomicAllGIndex(ptr **g, i uintptr) *g {
541 return *(**g)(add(unsafe.Pointer(ptr), i*sys.PtrSize))
542 }
543
544 const (
545
546
547 _GoidCacheBatch = 16
548 )
549
550
551
552 func cpuinit() {
553 const prefix = "GODEBUG="
554 var env string
555
556 switch GOOS {
557 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
558 cpu.DebugOptions = true
559
560
561
562
563 n := int32(0)
564 for argv_index(argv, argc+1+n) != nil {
565 n++
566 }
567
568 for i := int32(0); i < n; i++ {
569 p := argv_index(argv, argc+1+i)
570 s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
571
572 if hasPrefix(s, prefix) {
573 env = gostring(p)[len(prefix):]
574 break
575 }
576 }
577 }
578
579 cpu.Initialize(env)
580
581
582
583 x86HasPOPCNT = cpu.X86.HasPOPCNT
584 x86HasSSE41 = cpu.X86.HasSSE41
585 x86HasFMA = cpu.X86.HasFMA
586
587 armHasVFPv4 = cpu.ARM.HasVFPv4
588
589 arm64HasATOMICS = cpu.ARM64.HasATOMICS
590 }
591
592
593
594
595
596
597
598
599
600 func schedinit() {
601 lockInit(&sched.lock, lockRankSched)
602 lockInit(&sched.sysmonlock, lockRankSysmon)
603 lockInit(&sched.deferlock, lockRankDefer)
604 lockInit(&sched.sudoglock, lockRankSudog)
605 lockInit(&deadlock, lockRankDeadlock)
606 lockInit(&paniclk, lockRankPanic)
607 lockInit(&allglock, lockRankAllg)
608 lockInit(&allpLock, lockRankAllp)
609 lockInit(&reflectOffs.lock, lockRankReflectOffs)
610 lockInit(&finlock, lockRankFin)
611 lockInit(&trace.bufLock, lockRankTraceBuf)
612 lockInit(&trace.stringsLock, lockRankTraceStrings)
613 lockInit(&trace.lock, lockRankTrace)
614 lockInit(&cpuprof.lock, lockRankCpuprof)
615 lockInit(&trace.stackTab.lock, lockRankTraceStackTab)
616
617
618
619 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
620
621
622
623 _g_ := getg()
624 if raceenabled {
625 _g_.racectx, raceprocctx0 = raceinit()
626 }
627
628 sched.maxmcount = 10000
629
630
631 worldStopped()
632
633 moduledataverify()
634 stackinit()
635 mallocinit()
636 fastrandinit()
637 mcommoninit(_g_.m, -1)
638 cpuinit()
639 alginit()
640 modulesinit()
641 typelinksinit()
642 itabsinit()
643
644 sigsave(&_g_.m.sigmask)
645 initSigmask = _g_.m.sigmask
646
647 goargs()
648 goenvs()
649 parsedebugvars()
650 gcinit()
651
652 lock(&sched.lock)
653 sched.lastpoll = uint64(nanotime())
654 procs := ncpu
655 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
656 procs = n
657 }
658 if procresize(procs) != nil {
659 throw("unknown runnable goroutine during bootstrap")
660 }
661 unlock(&sched.lock)
662
663
664 worldStarted()
665
666
667
668
669 if debug.cgocheck > 1 {
670 writeBarrier.cgo = true
671 writeBarrier.enabled = true
672 for _, p := range allp {
673 p.wbBuf.reset()
674 }
675 }
676
677 if buildVersion == "" {
678
679
680 buildVersion = "unknown"
681 }
682 if len(modinfo) == 1 {
683
684
685 modinfo = ""
686 }
687 }
688
689 func dumpgstatus(gp *g) {
690 _g_ := getg()
691 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
692 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
693 }
694
695
696 func checkmcount() {
697 assertLockHeld(&sched.lock)
698
699 if mcount() > sched.maxmcount {
700 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
701 throw("thread exhaustion")
702 }
703 }
704
705
706
707
708
709 func mReserveID() int64 {
710 assertLockHeld(&sched.lock)
711
712 if sched.mnext+1 < sched.mnext {
713 throw("runtime: thread ID overflow")
714 }
715 id := sched.mnext
716 sched.mnext++
717 checkmcount()
718 return id
719 }
720
721
722 func mcommoninit(mp *m, id int64) {
723 _g_ := getg()
724
725
726 if _g_ != _g_.m.g0 {
727 callers(1, mp.createstack[:])
728 }
729
730 lock(&sched.lock)
731
732 if id >= 0 {
733 mp.id = id
734 } else {
735 mp.id = mReserveID()
736 }
737
738 mp.fastrand[0] = uint32(int64Hash(uint64(mp.id), fastrandseed))
739 mp.fastrand[1] = uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
740 if mp.fastrand[0]|mp.fastrand[1] == 0 {
741 mp.fastrand[1] = 1
742 }
743
744 mpreinit(mp)
745 if mp.gsignal != nil {
746 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
747 }
748
749
750
751 mp.alllink = allm
752
753
754
755 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
756 unlock(&sched.lock)
757
758
759 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
760 mp.cgoCallers = new(cgoCallers)
761 }
762 }
763
764 var fastrandseed uintptr
765
766 func fastrandinit() {
767 s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
768 getRandomData(s)
769 }
770
771
772 func ready(gp *g, traceskip int, next bool) {
773 if trace.enabled {
774 traceGoUnpark(gp, traceskip)
775 }
776
777 status := readgstatus(gp)
778
779
780 _g_ := getg()
781 mp := acquirem()
782 if status&^_Gscan != _Gwaiting {
783 dumpgstatus(gp)
784 throw("bad g->status in ready")
785 }
786
787
788 casgstatus(gp, _Gwaiting, _Grunnable)
789 runqput(_g_.m.p.ptr(), gp, next)
790 wakep()
791 releasem(mp)
792 }
793
794
795
796 const freezeStopWait = 0x7fffffff
797
798
799
800 var freezing uint32
801
802
803
804
805 func freezetheworld() {
806 atomic.Store(&freezing, 1)
807
808
809
810 for i := 0; i < 5; i++ {
811
812 sched.stopwait = freezeStopWait
813 atomic.Store(&sched.gcwaiting, 1)
814
815 if !preemptall() {
816 break
817 }
818 usleep(1000)
819 }
820
821 usleep(1000)
822 preemptall()
823 usleep(1000)
824 }
825
826
827
828
829 func readgstatus(gp *g) uint32 {
830 return atomic.Load(&gp.atomicstatus)
831 }
832
833
834
835
836
837 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
838 success := false
839
840
841 switch oldval {
842 default:
843 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
844 dumpgstatus(gp)
845 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
846 case _Gscanrunnable,
847 _Gscanwaiting,
848 _Gscanrunning,
849 _Gscansyscall,
850 _Gscanpreempted:
851 if newval == oldval&^_Gscan {
852 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
853 }
854 }
855 if !success {
856 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
857 dumpgstatus(gp)
858 throw("casfrom_Gscanstatus: gp->status is not in scan state")
859 }
860 releaseLockRank(lockRankGscan)
861 }
862
863
864
865 func castogscanstatus(gp *g, oldval, newval uint32) bool {
866 switch oldval {
867 case _Grunnable,
868 _Grunning,
869 _Gwaiting,
870 _Gsyscall:
871 if newval == oldval|_Gscan {
872 r := atomic.Cas(&gp.atomicstatus, oldval, newval)
873 if r {
874 acquireLockRank(lockRankGscan)
875 }
876 return r
877
878 }
879 }
880 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
881 throw("castogscanstatus")
882 panic("not reached")
883 }
884
885
886
887
888
889
890 func casgstatus(gp *g, oldval, newval uint32) {
891 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
892 systemstack(func() {
893 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
894 throw("casgstatus: bad incoming values")
895 })
896 }
897
898 acquireLockRank(lockRankGscan)
899 releaseLockRank(lockRankGscan)
900
901
902 const yieldDelay = 5 * 1000
903 var nextYield int64
904
905
906
907 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
908 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
909 throw("casgstatus: waiting for Gwaiting but is Grunnable")
910 }
911 if i == 0 {
912 nextYield = nanotime() + yieldDelay
913 }
914 if nanotime() < nextYield {
915 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
916 procyield(1)
917 }
918 } else {
919 osyield()
920 nextYield = nanotime() + yieldDelay/2
921 }
922 }
923 }
924
925
926
927
928
929
930
931 func casgcopystack(gp *g) uint32 {
932 for {
933 oldstatus := readgstatus(gp) &^ _Gscan
934 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
935 throw("copystack: bad status, not Gwaiting or Grunnable")
936 }
937 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
938 return oldstatus
939 }
940 }
941 }
942
943
944
945
946
947 func casGToPreemptScan(gp *g, old, new uint32) {
948 if old != _Grunning || new != _Gscan|_Gpreempted {
949 throw("bad g transition")
950 }
951 acquireLockRank(lockRankGscan)
952 for !atomic.Cas(&gp.atomicstatus, _Grunning, _Gscan|_Gpreempted) {
953 }
954 }
955
956
957
958
959 func casGFromPreempted(gp *g, old, new uint32) bool {
960 if old != _Gpreempted || new != _Gwaiting {
961 throw("bad g transition")
962 }
963 return atomic.Cas(&gp.atomicstatus, _Gpreempted, _Gwaiting)
964 }
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980 func stopTheWorld(reason string) {
981 semacquire(&worldsema)
982 gp := getg()
983 gp.m.preemptoff = reason
984 systemstack(func() {
985
986
987
988
989
990
991
992
993
994
995 casgstatus(gp, _Grunning, _Gwaiting)
996 stopTheWorldWithSema()
997 casgstatus(gp, _Gwaiting, _Grunning)
998 })
999 }
1000
1001
1002 func startTheWorld() {
1003 systemstack(func() { startTheWorldWithSema(false) })
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020 mp := acquirem()
1021 mp.preemptoff = ""
1022 semrelease1(&worldsema, true, 0)
1023 releasem(mp)
1024 }
1025
1026
1027
1028
1029 func stopTheWorldGC(reason string) {
1030 semacquire(&gcsema)
1031 stopTheWorld(reason)
1032 }
1033
1034
1035 func startTheWorldGC() {
1036 startTheWorld()
1037 semrelease(&gcsema)
1038 }
1039
1040
1041 var worldsema uint32 = 1
1042
1043
1044
1045
1046
1047
1048
1049 var gcsema uint32 = 1
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 func stopTheWorldWithSema() {
1074 _g_ := getg()
1075
1076
1077
1078 if _g_.m.locks > 0 {
1079 throw("stopTheWorld: holding locks")
1080 }
1081
1082 lock(&sched.lock)
1083 sched.stopwait = gomaxprocs
1084 atomic.Store(&sched.gcwaiting, 1)
1085 preemptall()
1086
1087 _g_.m.p.ptr().status = _Pgcstop
1088 sched.stopwait--
1089
1090 for _, p := range allp {
1091 s := p.status
1092 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
1093 if trace.enabled {
1094 traceGoSysBlock(p)
1095 traceProcStop(p)
1096 }
1097 p.syscalltick++
1098 sched.stopwait--
1099 }
1100 }
1101
1102 for {
1103 p := pidleget()
1104 if p == nil {
1105 break
1106 }
1107 p.status = _Pgcstop
1108 sched.stopwait--
1109 }
1110 wait := sched.stopwait > 0
1111 unlock(&sched.lock)
1112
1113
1114 if wait {
1115 for {
1116
1117 if notetsleep(&sched.stopnote, 100*1000) {
1118 noteclear(&sched.stopnote)
1119 break
1120 }
1121 preemptall()
1122 }
1123 }
1124
1125
1126 bad := ""
1127 if sched.stopwait != 0 {
1128 bad = "stopTheWorld: not stopped (stopwait != 0)"
1129 } else {
1130 for _, p := range allp {
1131 if p.status != _Pgcstop {
1132 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1133 }
1134 }
1135 }
1136 if atomic.Load(&freezing) != 0 {
1137
1138
1139
1140
1141 lock(&deadlock)
1142 lock(&deadlock)
1143 }
1144 if bad != "" {
1145 throw(bad)
1146 }
1147
1148 worldStopped()
1149 }
1150
1151 func startTheWorldWithSema(emitTraceEvent bool) int64 {
1152 assertWorldStopped()
1153
1154 mp := acquirem()
1155 if netpollinited() {
1156 list := netpoll(0)
1157 injectglist(&list)
1158 }
1159 lock(&sched.lock)
1160
1161 procs := gomaxprocs
1162 if newprocs != 0 {
1163 procs = newprocs
1164 newprocs = 0
1165 }
1166 p1 := procresize(procs)
1167 sched.gcwaiting = 0
1168 if sched.sysmonwait != 0 {
1169 sched.sysmonwait = 0
1170 notewakeup(&sched.sysmonnote)
1171 }
1172 unlock(&sched.lock)
1173
1174 worldStarted()
1175
1176 for p1 != nil {
1177 p := p1
1178 p1 = p1.link.ptr()
1179 if p.m != 0 {
1180 mp := p.m.ptr()
1181 p.m = 0
1182 if mp.nextp != 0 {
1183 throw("startTheWorld: inconsistent mp->nextp")
1184 }
1185 mp.nextp.set(p)
1186 notewakeup(&mp.park)
1187 } else {
1188
1189 newm(nil, p, -1)
1190 }
1191 }
1192
1193
1194 startTime := nanotime()
1195 if emitTraceEvent {
1196 traceGCSTWDone()
1197 }
1198
1199
1200
1201
1202 wakep()
1203
1204 releasem(mp)
1205
1206 return startTime
1207 }
1208
1209
1210
1211 func usesLibcall() bool {
1212 switch GOOS {
1213 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1214 return true
1215 case "openbsd":
1216 return GOARCH == "amd64" || GOARCH == "arm64"
1217 }
1218 return false
1219 }
1220
1221
1222
1223 func mStackIsSystemAllocated() bool {
1224 switch GOOS {
1225 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1226 return true
1227 case "openbsd":
1228 switch GOARCH {
1229 case "amd64", "arm64":
1230 return true
1231 }
1232 }
1233 return false
1234 }
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246 func mstart() {
1247 _g_ := getg()
1248
1249 osStack := _g_.stack.lo == 0
1250 if osStack {
1251
1252
1253
1254
1255
1256
1257
1258
1259 size := _g_.stack.hi
1260 if size == 0 {
1261 size = 8192 * sys.StackGuardMultiplier
1262 }
1263 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1264 _g_.stack.lo = _g_.stack.hi - size + 1024
1265 }
1266
1267
1268 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1269
1270
1271 _g_.stackguard1 = _g_.stackguard0
1272 mstart1()
1273
1274
1275 if mStackIsSystemAllocated() {
1276
1277
1278
1279 osStack = true
1280 }
1281 mexit(osStack)
1282 }
1283
1284 func mstart1() {
1285 _g_ := getg()
1286
1287 if _g_ != _g_.m.g0 {
1288 throw("bad runtime·mstart")
1289 }
1290
1291
1292
1293
1294
1295 save(getcallerpc(), getcallersp())
1296 asminit()
1297 minit()
1298
1299
1300
1301 if _g_.m == &m0 {
1302 mstartm0()
1303 }
1304
1305 if fn := _g_.m.mstartfn; fn != nil {
1306 fn()
1307 }
1308
1309 if _g_.m != &m0 {
1310 acquirep(_g_.m.nextp.ptr())
1311 _g_.m.nextp = 0
1312 }
1313 schedule()
1314 }
1315
1316
1317
1318
1319
1320
1321
1322 func mstartm0() {
1323
1324
1325
1326 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1327 cgoHasExtraM = true
1328 newextram()
1329 }
1330 initsig(false)
1331 }
1332
1333
1334
1335
1336
1337 func mPark() {
1338 g := getg()
1339 for {
1340 notesleep(&g.m.park)
1341 noteclear(&g.m.park)
1342 if !mDoFixup() {
1343 return
1344 }
1345 }
1346 }
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358 func mexit(osStack bool) {
1359 g := getg()
1360 m := g.m
1361
1362 if m == &m0 {
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374 handoffp(releasep())
1375 lock(&sched.lock)
1376 sched.nmfreed++
1377 checkdead()
1378 unlock(&sched.lock)
1379 mPark()
1380 throw("locked m0 woke up")
1381 }
1382
1383 sigblock(true)
1384 unminit()
1385
1386
1387 if m.gsignal != nil {
1388 stackfree(m.gsignal.stack)
1389
1390
1391
1392
1393 m.gsignal = nil
1394 }
1395
1396
1397 lock(&sched.lock)
1398 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1399 if *pprev == m {
1400 *pprev = m.alllink
1401 goto found
1402 }
1403 }
1404 throw("m not found in allm")
1405 found:
1406 if !osStack {
1407
1408
1409
1410
1411 atomic.Store(&m.freeWait, 1)
1412
1413
1414
1415
1416 m.freelink = sched.freem
1417 sched.freem = m
1418 }
1419 unlock(&sched.lock)
1420
1421
1422 handoffp(releasep())
1423
1424
1425
1426
1427
1428 lock(&sched.lock)
1429 sched.nmfreed++
1430 checkdead()
1431 unlock(&sched.lock)
1432
1433 if GOOS == "darwin" || GOOS == "ios" {
1434
1435
1436 if atomic.Load(&m.signalPending) != 0 {
1437 atomic.Xadd(&pendingPreemptSignals, -1)
1438 }
1439 }
1440
1441
1442
1443 mdestroy(m)
1444
1445 if osStack {
1446
1447
1448 return
1449 }
1450
1451
1452
1453
1454
1455 exitThread(&m.freeWait)
1456 }
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469 func forEachP(fn func(*p)) {
1470 mp := acquirem()
1471 _p_ := getg().m.p.ptr()
1472
1473 lock(&sched.lock)
1474 if sched.safePointWait != 0 {
1475 throw("forEachP: sched.safePointWait != 0")
1476 }
1477 sched.safePointWait = gomaxprocs - 1
1478 sched.safePointFn = fn
1479
1480
1481 for _, p := range allp {
1482 if p != _p_ {
1483 atomic.Store(&p.runSafePointFn, 1)
1484 }
1485 }
1486 preemptall()
1487
1488
1489
1490
1491
1492
1493
1494 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1495 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1496 fn(p)
1497 sched.safePointWait--
1498 }
1499 }
1500
1501 wait := sched.safePointWait > 0
1502 unlock(&sched.lock)
1503
1504
1505 fn(_p_)
1506
1507
1508
1509 for _, p := range allp {
1510 s := p.status
1511 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
1512 if trace.enabled {
1513 traceGoSysBlock(p)
1514 traceProcStop(p)
1515 }
1516 p.syscalltick++
1517 handoffp(p)
1518 }
1519 }
1520
1521
1522 if wait {
1523 for {
1524
1525
1526
1527
1528 if notetsleep(&sched.safePointNote, 100*1000) {
1529 noteclear(&sched.safePointNote)
1530 break
1531 }
1532 preemptall()
1533 }
1534 }
1535 if sched.safePointWait != 0 {
1536 throw("forEachP: not done")
1537 }
1538 for _, p := range allp {
1539 if p.runSafePointFn != 0 {
1540 throw("forEachP: P did not run fn")
1541 }
1542 }
1543
1544 lock(&sched.lock)
1545 sched.safePointFn = nil
1546 unlock(&sched.lock)
1547 releasem(mp)
1548 }
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564 func syscall_runtime_doAllThreadsSyscall(fn func(bool) bool) {
1565 if iscgo {
1566 panic("doAllThreadsSyscall not supported with cgo enabled")
1567 }
1568 if fn == nil {
1569 return
1570 }
1571 for atomic.Load(&sched.sysmonStarting) != 0 {
1572 osyield()
1573 }
1574 stopTheWorldGC("doAllThreadsSyscall")
1575 if atomic.Load(&newmHandoff.haveTemplateThread) != 0 {
1576
1577
1578 lock(&newmHandoff.lock)
1579 for !newmHandoff.waiting {
1580 unlock(&newmHandoff.lock)
1581 osyield()
1582 lock(&newmHandoff.lock)
1583 }
1584 unlock(&newmHandoff.lock)
1585 }
1586 if netpollinited() {
1587 netpollBreak()
1588 }
1589 sigRecvPrepareForFixup()
1590 _g_ := getg()
1591 if raceenabled {
1592
1593
1594 lock(&mFixupRace.lock)
1595 mFixupRace.ctx = _g_.racectx
1596 unlock(&mFixupRace.lock)
1597 }
1598 if ok := fn(true); ok {
1599 tid := _g_.m.procid
1600 for mp := allm; mp != nil; mp = mp.alllink {
1601 if mp.procid == tid {
1602
1603
1604 continue
1605 }
1606
1607
1608
1609
1610
1611
1612
1613 if mp.procid == 0 && !mp.doesPark {
1614
1615
1616
1617
1618 throw("unsupported runtime environment")
1619 }
1620
1621
1622
1623 lock(&mp.mFixup.lock)
1624 mp.mFixup.fn = fn
1625 if mp.doesPark {
1626
1627
1628
1629
1630
1631 notewakeup(&mp.park)
1632 }
1633 unlock(&mp.mFixup.lock)
1634 }
1635 for {
1636 done := true
1637 for mp := allm; done && mp != nil; mp = mp.alllink {
1638 if mp.procid == tid {
1639 continue
1640 }
1641 lock(&mp.mFixup.lock)
1642 done = done && (mp.mFixup.fn == nil)
1643 unlock(&mp.mFixup.lock)
1644 }
1645 if done {
1646 break
1647 }
1648
1649 lock(&sched.lock)
1650 if atomic.Load(&sched.sysmonwait) != 0 {
1651 atomic.Store(&sched.sysmonwait, 0)
1652 notewakeup(&sched.sysmonnote)
1653 }
1654 unlock(&sched.lock)
1655 lock(&newmHandoff.lock)
1656 if newmHandoff.waiting {
1657 newmHandoff.waiting = false
1658 notewakeup(&newmHandoff.wake)
1659 }
1660 unlock(&newmHandoff.lock)
1661 osyield()
1662 }
1663 }
1664 if raceenabled {
1665 lock(&mFixupRace.lock)
1666 mFixupRace.ctx = 0
1667 unlock(&mFixupRace.lock)
1668 }
1669 startTheWorldGC()
1670 }
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683 func runSafePointFn() {
1684 p := getg().m.p.ptr()
1685
1686
1687
1688 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1689 return
1690 }
1691 sched.safePointFn(p)
1692 lock(&sched.lock)
1693 sched.safePointWait--
1694 if sched.safePointWait == 0 {
1695 notewakeup(&sched.safePointNote)
1696 }
1697 unlock(&sched.lock)
1698 }
1699
1700
1701
1702
1703 var cgoThreadStart unsafe.Pointer
1704
1705 type cgothreadstart struct {
1706 g guintptr
1707 tls *uint64
1708 fn unsafe.Pointer
1709 }
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720 func allocm(_p_ *p, fn func(), id int64) *m {
1721 _g_ := getg()
1722 acquirem()
1723 if _g_.m.p == 0 {
1724 acquirep(_p_)
1725 }
1726
1727
1728
1729 if sched.freem != nil {
1730 lock(&sched.lock)
1731 var newList *m
1732 for freem := sched.freem; freem != nil; {
1733 if freem.freeWait != 0 {
1734 next := freem.freelink
1735 freem.freelink = newList
1736 newList = freem
1737 freem = next
1738 continue
1739 }
1740
1741
1742
1743 systemstack(func() {
1744 stackfree(freem.g0.stack)
1745 })
1746 freem = freem.freelink
1747 }
1748 sched.freem = newList
1749 unlock(&sched.lock)
1750 }
1751
1752 mp := new(m)
1753 mp.mstartfn = fn
1754 mcommoninit(mp, id)
1755
1756
1757
1758 if iscgo || mStackIsSystemAllocated() {
1759 mp.g0 = malg(-1)
1760 } else {
1761 mp.g0 = malg(8192 * sys.StackGuardMultiplier)
1762 }
1763 mp.g0.m = mp
1764
1765 if _p_ == _g_.m.p.ptr() {
1766 releasep()
1767 }
1768 releasem(_g_.m)
1769
1770 return mp
1771 }
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807 func needm() {
1808 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1809
1810
1811
1812
1813
1814
1815 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
1816 exit(1)
1817 }
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827 var sigmask sigset
1828 sigsave(&sigmask)
1829 sigblock(false)
1830
1831
1832
1833
1834
1835 mp := lockextra(false)
1836
1837
1838
1839
1840
1841
1842
1843
1844 mp.needextram = mp.schedlink == 0
1845 extraMCount--
1846 unlockextra(mp.schedlink.ptr())
1847
1848
1849 mp.sigmask = sigmask
1850
1851
1852
1853
1854
1855
1856 setg(mp.g0)
1857 _g_ := getg()
1858 _g_.stack.hi = getcallersp() + 1024
1859 _g_.stack.lo = getcallersp() - 32*1024
1860 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1861
1862
1863 asminit()
1864 minit()
1865
1866
1867 casgstatus(mp.curg, _Gdead, _Gsyscall)
1868 atomic.Xadd(&sched.ngsys, -1)
1869 }
1870
1871 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
1872
1873
1874
1875
1876 func newextram() {
1877 c := atomic.Xchg(&extraMWaiters, 0)
1878 if c > 0 {
1879 for i := uint32(0); i < c; i++ {
1880 oneNewExtraM()
1881 }
1882 } else {
1883
1884 mp := lockextra(true)
1885 unlockextra(mp)
1886 if mp == nil {
1887 oneNewExtraM()
1888 }
1889 }
1890 }
1891
1892
1893 func oneNewExtraM() {
1894
1895
1896
1897
1898
1899 mp := allocm(nil, nil, -1)
1900 gp := malg(4096)
1901 gp.sched.pc = funcPC(goexit) + sys.PCQuantum
1902 gp.sched.sp = gp.stack.hi
1903 gp.sched.sp -= 4 * sys.RegSize
1904 gp.sched.lr = 0
1905 gp.sched.g = guintptr(unsafe.Pointer(gp))
1906 gp.syscallpc = gp.sched.pc
1907 gp.syscallsp = gp.sched.sp
1908 gp.stktopsp = gp.sched.sp
1909
1910
1911
1912
1913 casgstatus(gp, _Gidle, _Gdead)
1914 gp.m = mp
1915 mp.curg = gp
1916 mp.lockedInt++
1917 mp.lockedg.set(gp)
1918 gp.lockedm.set(mp)
1919 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
1920 if raceenabled {
1921 gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum)
1922 }
1923
1924 allgadd(gp)
1925
1926
1927
1928
1929
1930 atomic.Xadd(&sched.ngsys, +1)
1931
1932
1933 mnext := lockextra(true)
1934 mp.schedlink.set(mnext)
1935 extraMCount++
1936 unlockextra(mp)
1937 }
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962 func dropm() {
1963
1964
1965
1966 mp := getg().m
1967
1968
1969 casgstatus(mp.curg, _Gsyscall, _Gdead)
1970 mp.curg.preemptStop = false
1971 atomic.Xadd(&sched.ngsys, +1)
1972
1973
1974
1975
1976
1977 sigmask := mp.sigmask
1978 sigblock(false)
1979 unminit()
1980
1981 mnext := lockextra(true)
1982 extraMCount++
1983 mp.schedlink.set(mnext)
1984
1985 setg(nil)
1986
1987
1988 unlockextra(mp)
1989
1990 msigrestore(sigmask)
1991 }
1992
1993
1994 func getm() uintptr {
1995 return uintptr(unsafe.Pointer(getg().m))
1996 }
1997
1998 var extram uintptr
1999 var extraMCount uint32
2000 var extraMWaiters uint32
2001
2002
2003
2004
2005
2006
2007
2008 func lockextra(nilokay bool) *m {
2009 const locked = 1
2010
2011 incr := false
2012 for {
2013 old := atomic.Loaduintptr(&extram)
2014 if old == locked {
2015 osyield()
2016 continue
2017 }
2018 if old == 0 && !nilokay {
2019 if !incr {
2020
2021
2022
2023 atomic.Xadd(&extraMWaiters, 1)
2024 incr = true
2025 }
2026 usleep(1)
2027 continue
2028 }
2029 if atomic.Casuintptr(&extram, old, locked) {
2030 return (*m)(unsafe.Pointer(old))
2031 }
2032 osyield()
2033 continue
2034 }
2035 }
2036
2037
2038 func unlockextra(mp *m) {
2039 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
2040 }
2041
2042
2043
2044 var execLock rwmutex
2045
2046
2047
2048
2049 var newmHandoff struct {
2050 lock mutex
2051
2052
2053
2054 newm muintptr
2055
2056
2057
2058 waiting bool
2059 wake note
2060
2061
2062
2063
2064 haveTemplateThread uint32
2065 }
2066
2067
2068
2069
2070
2071
2072
2073 func newm(fn func(), _p_ *p, id int64) {
2074 mp := allocm(_p_, fn, id)
2075 mp.doesPark = (_p_ != nil)
2076 mp.nextp.set(_p_)
2077 mp.sigmask = initSigmask
2078 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090 lock(&newmHandoff.lock)
2091 if newmHandoff.haveTemplateThread == 0 {
2092 throw("on a locked thread with no template thread")
2093 }
2094 mp.schedlink = newmHandoff.newm
2095 newmHandoff.newm.set(mp)
2096 if newmHandoff.waiting {
2097 newmHandoff.waiting = false
2098 notewakeup(&newmHandoff.wake)
2099 }
2100 unlock(&newmHandoff.lock)
2101 return
2102 }
2103 newm1(mp)
2104 }
2105
2106 func newm1(mp *m) {
2107 if iscgo {
2108 var ts cgothreadstart
2109 if _cgo_thread_start == nil {
2110 throw("_cgo_thread_start missing")
2111 }
2112 ts.g.set(mp.g0)
2113 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2114 ts.fn = unsafe.Pointer(funcPC(mstart))
2115 if msanenabled {
2116 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2117 }
2118 execLock.rlock()
2119 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2120 execLock.runlock()
2121 return
2122 }
2123 execLock.rlock()
2124 newosproc(mp)
2125 execLock.runlock()
2126 }
2127
2128
2129
2130
2131
2132 func startTemplateThread() {
2133 if GOARCH == "wasm" {
2134 return
2135 }
2136
2137
2138
2139 mp := acquirem()
2140 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2141 releasem(mp)
2142 return
2143 }
2144 newm(templateThread, nil, -1)
2145 releasem(mp)
2146 }
2147
2148
2149
2150
2151
2152 var mFixupRace struct {
2153 lock mutex
2154 ctx uintptr
2155 }
2156
2157
2158
2159
2160
2161 func mDoFixup() bool {
2162 _g_ := getg()
2163 lock(&_g_.m.mFixup.lock)
2164 fn := _g_.m.mFixup.fn
2165 if fn != nil {
2166 if gcphase != _GCoff {
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177 throw("GC must be disabled to protect validity of fn value")
2178 }
2179 *(*uintptr)(unsafe.Pointer(&_g_.m.mFixup.fn)) = 0
2180 if _g_.racectx != 0 || !raceenabled {
2181 fn(false)
2182 } else {
2183
2184
2185
2186
2187
2188 lock(&mFixupRace.lock)
2189 _g_.racectx = mFixupRace.ctx
2190 fn(false)
2191 _g_.racectx = 0
2192 unlock(&mFixupRace.lock)
2193 }
2194 }
2195 unlock(&_g_.m.mFixup.lock)
2196 return fn != nil
2197 }
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211 func templateThread() {
2212 lock(&sched.lock)
2213 sched.nmsys++
2214 checkdead()
2215 unlock(&sched.lock)
2216
2217 for {
2218 lock(&newmHandoff.lock)
2219 for newmHandoff.newm != 0 {
2220 newm := newmHandoff.newm.ptr()
2221 newmHandoff.newm = 0
2222 unlock(&newmHandoff.lock)
2223 for newm != nil {
2224 next := newm.schedlink.ptr()
2225 newm.schedlink = 0
2226 newm1(newm)
2227 newm = next
2228 }
2229 lock(&newmHandoff.lock)
2230 }
2231 newmHandoff.waiting = true
2232 noteclear(&newmHandoff.wake)
2233 unlock(&newmHandoff.lock)
2234 notesleep(&newmHandoff.wake)
2235 mDoFixup()
2236 }
2237 }
2238
2239
2240
2241 func stopm() {
2242 _g_ := getg()
2243
2244 if _g_.m.locks != 0 {
2245 throw("stopm holding locks")
2246 }
2247 if _g_.m.p != 0 {
2248 throw("stopm holding p")
2249 }
2250 if _g_.m.spinning {
2251 throw("stopm spinning")
2252 }
2253
2254 lock(&sched.lock)
2255 mput(_g_.m)
2256 unlock(&sched.lock)
2257 mPark()
2258 acquirep(_g_.m.nextp.ptr())
2259 _g_.m.nextp = 0
2260 }
2261
2262 func mspinning() {
2263
2264 getg().m.spinning = true
2265 }
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278 func startm(_p_ *p, spinning bool) {
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295 mp := acquirem()
2296 lock(&sched.lock)
2297 if _p_ == nil {
2298 _p_ = pidleget()
2299 if _p_ == nil {
2300 unlock(&sched.lock)
2301 if spinning {
2302
2303
2304 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2305 throw("startm: negative nmspinning")
2306 }
2307 }
2308 releasem(mp)
2309 return
2310 }
2311 }
2312 nmp := mget()
2313 if nmp == nil {
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326 id := mReserveID()
2327 unlock(&sched.lock)
2328
2329 var fn func()
2330 if spinning {
2331
2332 fn = mspinning
2333 }
2334 newm(fn, _p_, id)
2335
2336
2337 releasem(mp)
2338 return
2339 }
2340 unlock(&sched.lock)
2341 if nmp.spinning {
2342 throw("startm: m is spinning")
2343 }
2344 if nmp.nextp != 0 {
2345 throw("startm: m has p")
2346 }
2347 if spinning && !runqempty(_p_) {
2348 throw("startm: p has runnable gs")
2349 }
2350
2351 nmp.spinning = spinning
2352 nmp.nextp.set(_p_)
2353 notewakeup(&nmp.park)
2354
2355
2356 releasem(mp)
2357 }
2358
2359
2360
2361
2362 func handoffp(_p_ *p) {
2363
2364
2365
2366
2367 if !runqempty(_p_) || sched.runqsize != 0 {
2368 startm(_p_, false)
2369 return
2370 }
2371
2372 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
2373 startm(_p_, false)
2374 return
2375 }
2376
2377
2378 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) {
2379 startm(_p_, true)
2380 return
2381 }
2382 lock(&sched.lock)
2383 if sched.gcwaiting != 0 {
2384 _p_.status = _Pgcstop
2385 sched.stopwait--
2386 if sched.stopwait == 0 {
2387 notewakeup(&sched.stopnote)
2388 }
2389 unlock(&sched.lock)
2390 return
2391 }
2392 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
2393 sched.safePointFn(_p_)
2394 sched.safePointWait--
2395 if sched.safePointWait == 0 {
2396 notewakeup(&sched.safePointNote)
2397 }
2398 }
2399 if sched.runqsize != 0 {
2400 unlock(&sched.lock)
2401 startm(_p_, false)
2402 return
2403 }
2404
2405
2406 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
2407 unlock(&sched.lock)
2408 startm(_p_, false)
2409 return
2410 }
2411
2412
2413
2414 when := nobarrierWakeTime(_p_)
2415 pidleput(_p_)
2416 unlock(&sched.lock)
2417
2418 if when != 0 {
2419 wakeNetPoller(when)
2420 }
2421 }
2422
2423
2424
2425 func wakep() {
2426 if atomic.Load(&sched.npidle) == 0 {
2427 return
2428 }
2429
2430 if atomic.Load(&sched.nmspinning) != 0 || !atomic.Cas(&sched.nmspinning, 0, 1) {
2431 return
2432 }
2433 startm(nil, true)
2434 }
2435
2436
2437
2438 func stoplockedm() {
2439 _g_ := getg()
2440
2441 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
2442 throw("stoplockedm: inconsistent locking")
2443 }
2444 if _g_.m.p != 0 {
2445
2446 _p_ := releasep()
2447 handoffp(_p_)
2448 }
2449 incidlelocked(1)
2450
2451 mPark()
2452 status := readgstatus(_g_.m.lockedg.ptr())
2453 if status&^_Gscan != _Grunnable {
2454 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
2455 dumpgstatus(_g_.m.lockedg.ptr())
2456 throw("stoplockedm: not runnable")
2457 }
2458 acquirep(_g_.m.nextp.ptr())
2459 _g_.m.nextp = 0
2460 }
2461
2462
2463
2464
2465 func startlockedm(gp *g) {
2466 _g_ := getg()
2467
2468 mp := gp.lockedm.ptr()
2469 if mp == _g_.m {
2470 throw("startlockedm: locked to me")
2471 }
2472 if mp.nextp != 0 {
2473 throw("startlockedm: m has p")
2474 }
2475
2476 incidlelocked(-1)
2477 _p_ := releasep()
2478 mp.nextp.set(_p_)
2479 notewakeup(&mp.park)
2480 stopm()
2481 }
2482
2483
2484
2485 func gcstopm() {
2486 _g_ := getg()
2487
2488 if sched.gcwaiting == 0 {
2489 throw("gcstopm: not waiting for gc")
2490 }
2491 if _g_.m.spinning {
2492 _g_.m.spinning = false
2493
2494
2495 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2496 throw("gcstopm: negative nmspinning")
2497 }
2498 }
2499 _p_ := releasep()
2500 lock(&sched.lock)
2501 _p_.status = _Pgcstop
2502 sched.stopwait--
2503 if sched.stopwait == 0 {
2504 notewakeup(&sched.stopnote)
2505 }
2506 unlock(&sched.lock)
2507 stopm()
2508 }
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519 func execute(gp *g, inheritTime bool) {
2520 _g_ := getg()
2521
2522
2523
2524 _g_.m.curg = gp
2525 gp.m = _g_.m
2526 casgstatus(gp, _Grunnable, _Grunning)
2527 gp.waitsince = 0
2528 gp.preempt = false
2529 gp.stackguard0 = gp.stack.lo + _StackGuard
2530 if !inheritTime {
2531 _g_.m.p.ptr().schedtick++
2532 }
2533
2534
2535 hz := sched.profilehz
2536 if _g_.m.profilehz != hz {
2537 setThreadCPUProfiler(hz)
2538 }
2539
2540 if trace.enabled {
2541
2542
2543 if gp.syscallsp != 0 && gp.sysblocktraced {
2544 traceGoSysExit(gp.sysexitticks)
2545 }
2546 traceGoStart()
2547 }
2548
2549 gogo(&gp.sched)
2550 }
2551
2552
2553
2554 func findrunnable() (gp *g, inheritTime bool) {
2555 _g_ := getg()
2556
2557
2558
2559
2560
2561 top:
2562 _p_ := _g_.m.p.ptr()
2563 if sched.gcwaiting != 0 {
2564 gcstopm()
2565 goto top
2566 }
2567 if _p_.runSafePointFn != 0 {
2568 runSafePointFn()
2569 }
2570
2571 now, pollUntil, _ := checkTimers(_p_, 0)
2572
2573 if fingwait && fingwake {
2574 if gp := wakefing(); gp != nil {
2575 ready(gp, 0, true)
2576 }
2577 }
2578 if *cgo_yield != nil {
2579 asmcgocall(*cgo_yield, nil)
2580 }
2581
2582
2583 if gp, inheritTime := runqget(_p_); gp != nil {
2584 return gp, inheritTime
2585 }
2586
2587
2588 if sched.runqsize != 0 {
2589 lock(&sched.lock)
2590 gp := globrunqget(_p_, 0)
2591 unlock(&sched.lock)
2592 if gp != nil {
2593 return gp, false
2594 }
2595 }
2596
2597
2598
2599
2600
2601
2602
2603
2604 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
2605 if list := netpoll(0); !list.empty() {
2606 gp := list.pop()
2607 injectglist(&list)
2608 casgstatus(gp, _Gwaiting, _Grunnable)
2609 if trace.enabled {
2610 traceGoUnpark(gp, 0)
2611 }
2612 return gp, false
2613 }
2614 }
2615
2616
2617 procs := uint32(gomaxprocs)
2618 ranTimer := false
2619
2620
2621
2622 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) {
2623 goto stop
2624 }
2625 if !_g_.m.spinning {
2626 _g_.m.spinning = true
2627 atomic.Xadd(&sched.nmspinning, 1)
2628 }
2629 const stealTries = 4
2630 for i := 0; i < stealTries; i++ {
2631 stealTimersOrRunNextG := i == stealTries-1
2632
2633 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
2634 if sched.gcwaiting != 0 {
2635 goto top
2636 }
2637 p2 := allp[enum.position()]
2638 if _p_ == p2 {
2639 continue
2640 }
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
2656 tnow, w, ran := checkTimers(p2, now)
2657 now = tnow
2658 if w != 0 && (pollUntil == 0 || w < pollUntil) {
2659 pollUntil = w
2660 }
2661 if ran {
2662
2663
2664
2665
2666
2667
2668
2669
2670 if gp, inheritTime := runqget(_p_); gp != nil {
2671 return gp, inheritTime
2672 }
2673 ranTimer = true
2674 }
2675 }
2676
2677
2678 if !idlepMask.read(enum.position()) {
2679 if gp := runqsteal(_p_, p2, stealTimersOrRunNextG); gp != nil {
2680 return gp, false
2681 }
2682 }
2683 }
2684 }
2685 if ranTimer {
2686
2687 goto top
2688 }
2689
2690 stop:
2691
2692
2693
2694
2695 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
2696 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
2697 if node != nil {
2698 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2699 gp := node.gp.ptr()
2700 casgstatus(gp, _Gwaiting, _Grunnable)
2701 if trace.enabled {
2702 traceGoUnpark(gp, 0)
2703 }
2704 return gp, false
2705 }
2706 }
2707
2708 delta := int64(-1)
2709 if pollUntil != 0 {
2710
2711 delta = pollUntil - now
2712 }
2713
2714
2715
2716
2717
2718 gp, otherReady := beforeIdle(delta)
2719 if gp != nil {
2720 casgstatus(gp, _Gwaiting, _Grunnable)
2721 if trace.enabled {
2722 traceGoUnpark(gp, 0)
2723 }
2724 return gp, false
2725 }
2726 if otherReady {
2727 goto top
2728 }
2729
2730
2731
2732
2733
2734 allpSnapshot := allp
2735
2736
2737 idlepMaskSnapshot := idlepMask
2738 timerpMaskSnapshot := timerpMask
2739
2740
2741 lock(&sched.lock)
2742 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
2743 unlock(&sched.lock)
2744 goto top
2745 }
2746 if sched.runqsize != 0 {
2747 gp := globrunqget(_p_, 0)
2748 unlock(&sched.lock)
2749 return gp, false
2750 }
2751 if releasep() != _p_ {
2752 throw("findrunnable: wrong p")
2753 }
2754 pidleput(_p_)
2755 unlock(&sched.lock)
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770 wasSpinning := _g_.m.spinning
2771 if _g_.m.spinning {
2772 _g_.m.spinning = false
2773 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2774 throw("findrunnable: negative nmspinning")
2775 }
2776 }
2777
2778
2779 for id, _p_ := range allpSnapshot {
2780 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(_p_) {
2781 lock(&sched.lock)
2782 _p_ = pidleget()
2783 unlock(&sched.lock)
2784 if _p_ != nil {
2785 acquirep(_p_)
2786 if wasSpinning {
2787 _g_.m.spinning = true
2788 atomic.Xadd(&sched.nmspinning, 1)
2789 }
2790 goto top
2791 }
2792 break
2793 }
2794 }
2795
2796
2797
2798
2799
2800 for id, _p_ := range allpSnapshot {
2801 if timerpMaskSnapshot.read(uint32(id)) {
2802 w := nobarrierWakeTime(_p_)
2803 if w != 0 && (pollUntil == 0 || w < pollUntil) {
2804 pollUntil = w
2805 }
2806 }
2807 }
2808 if pollUntil != 0 {
2809 if now == 0 {
2810 now = nanotime()
2811 }
2812 delta = pollUntil - now
2813 if delta < 0 {
2814 delta = 0
2815 }
2816 }
2817
2818
2819
2820
2821
2822 if atomic.Load(&gcBlackenEnabled) != 0 && gcMarkWorkAvailable(nil) {
2823
2824
2825
2826
2827
2828
2829
2830 lock(&sched.lock)
2831 var node *gcBgMarkWorkerNode
2832 _p_ = pidleget()
2833 if _p_ != nil {
2834
2835
2836 if gcBlackenEnabled != 0 {
2837 node = (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
2838 if node == nil {
2839 pidleput(_p_)
2840 _p_ = nil
2841 }
2842 } else {
2843 pidleput(_p_)
2844 _p_ = nil
2845 }
2846 }
2847 unlock(&sched.lock)
2848 if _p_ != nil {
2849 acquirep(_p_)
2850 if wasSpinning {
2851 _g_.m.spinning = true
2852 atomic.Xadd(&sched.nmspinning, 1)
2853 }
2854
2855
2856 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2857 gp := node.gp.ptr()
2858 casgstatus(gp, _Gwaiting, _Grunnable)
2859 if trace.enabled {
2860 traceGoUnpark(gp, 0)
2861 }
2862 return gp, false
2863 }
2864 }
2865
2866
2867 if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
2868 atomic.Store64(&sched.pollUntil, uint64(pollUntil))
2869 if _g_.m.p != 0 {
2870 throw("findrunnable: netpoll with p")
2871 }
2872 if _g_.m.spinning {
2873 throw("findrunnable: netpoll with spinning")
2874 }
2875 if faketime != 0 {
2876
2877 delta = 0
2878 }
2879 list := netpoll(delta)
2880 atomic.Store64(&sched.pollUntil, 0)
2881 atomic.Store64(&sched.lastpoll, uint64(nanotime()))
2882 if faketime != 0 && list.empty() {
2883
2884
2885 stopm()
2886 goto top
2887 }
2888 lock(&sched.lock)
2889 _p_ = pidleget()
2890 unlock(&sched.lock)
2891 if _p_ == nil {
2892 injectglist(&list)
2893 } else {
2894 acquirep(_p_)
2895 if !list.empty() {
2896 gp := list.pop()
2897 injectglist(&list)
2898 casgstatus(gp, _Gwaiting, _Grunnable)
2899 if trace.enabled {
2900 traceGoUnpark(gp, 0)
2901 }
2902 return gp, false
2903 }
2904 if wasSpinning {
2905 _g_.m.spinning = true
2906 atomic.Xadd(&sched.nmspinning, 1)
2907 }
2908 goto top
2909 }
2910 } else if pollUntil != 0 && netpollinited() {
2911 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
2912 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
2913 netpollBreak()
2914 }
2915 }
2916 stopm()
2917 goto top
2918 }
2919
2920
2921
2922
2923
2924 func pollWork() bool {
2925 if sched.runqsize != 0 {
2926 return true
2927 }
2928 p := getg().m.p.ptr()
2929 if !runqempty(p) {
2930 return true
2931 }
2932 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
2933 if list := netpoll(0); !list.empty() {
2934 injectglist(&list)
2935 return true
2936 }
2937 }
2938 return false
2939 }
2940
2941
2942
2943
2944 func wakeNetPoller(when int64) {
2945 if atomic.Load64(&sched.lastpoll) == 0 {
2946
2947
2948
2949
2950 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
2951 if pollerPollUntil == 0 || pollerPollUntil > when {
2952 netpollBreak()
2953 }
2954 } else {
2955
2956
2957 if GOOS != "plan9" {
2958 wakep()
2959 }
2960 }
2961 }
2962
2963 func resetspinning() {
2964 _g_ := getg()
2965 if !_g_.m.spinning {
2966 throw("resetspinning: not a spinning m")
2967 }
2968 _g_.m.spinning = false
2969 nmspinning := atomic.Xadd(&sched.nmspinning, -1)
2970 if int32(nmspinning) < 0 {
2971 throw("findrunnable: negative nmspinning")
2972 }
2973
2974
2975
2976 wakep()
2977 }
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987 func injectglist(glist *gList) {
2988 if glist.empty() {
2989 return
2990 }
2991 if trace.enabled {
2992 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
2993 traceGoUnpark(gp, 0)
2994 }
2995 }
2996
2997
2998
2999 head := glist.head.ptr()
3000 var tail *g
3001 qsize := 0
3002 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3003 tail = gp
3004 qsize++
3005 casgstatus(gp, _Gwaiting, _Grunnable)
3006 }
3007
3008
3009 var q gQueue
3010 q.head.set(head)
3011 q.tail.set(tail)
3012 *glist = gList{}
3013
3014 startIdle := func(n int) {
3015 for ; n != 0 && sched.npidle != 0; n-- {
3016 startm(nil, false)
3017 }
3018 }
3019
3020 pp := getg().m.p.ptr()
3021 if pp == nil {
3022 lock(&sched.lock)
3023 globrunqputbatch(&q, int32(qsize))
3024 unlock(&sched.lock)
3025 startIdle(qsize)
3026 return
3027 }
3028
3029 npidle := int(atomic.Load(&sched.npidle))
3030 var globq gQueue
3031 var n int
3032 for n = 0; n < npidle && !q.empty(); n++ {
3033 g := q.pop()
3034 globq.pushBack(g)
3035 }
3036 if n > 0 {
3037 lock(&sched.lock)
3038 globrunqputbatch(&globq, int32(n))
3039 unlock(&sched.lock)
3040 startIdle(n)
3041 qsize -= n
3042 }
3043
3044 if !q.empty() {
3045 runqputbatch(pp, &q, qsize)
3046 }
3047 }
3048
3049
3050
3051 func schedule() {
3052 _g_ := getg()
3053
3054 if _g_.m.locks != 0 {
3055 throw("schedule: holding locks")
3056 }
3057
3058 if _g_.m.lockedg != 0 {
3059 stoplockedm()
3060 execute(_g_.m.lockedg.ptr(), false)
3061 }
3062
3063
3064
3065 if _g_.m.incgo {
3066 throw("schedule: in cgo")
3067 }
3068
3069 top:
3070 pp := _g_.m.p.ptr()
3071 pp.preempt = false
3072
3073 if sched.gcwaiting != 0 {
3074 gcstopm()
3075 goto top
3076 }
3077 if pp.runSafePointFn != 0 {
3078 runSafePointFn()
3079 }
3080
3081
3082
3083
3084 if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
3085 throw("schedule: spinning with local work")
3086 }
3087
3088 checkTimers(pp, 0)
3089
3090 var gp *g
3091 var inheritTime bool
3092
3093
3094
3095
3096 tryWakeP := false
3097 if trace.enabled || trace.shutdown {
3098 gp = traceReader()
3099 if gp != nil {
3100 casgstatus(gp, _Gwaiting, _Grunnable)
3101 traceGoUnpark(gp, 0)
3102 tryWakeP = true
3103 }
3104 }
3105 if gp == nil && gcBlackenEnabled != 0 {
3106 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
3107 tryWakeP = tryWakeP || gp != nil
3108 }
3109 if gp == nil {
3110
3111
3112
3113 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
3114 lock(&sched.lock)
3115 gp = globrunqget(_g_.m.p.ptr(), 1)
3116 unlock(&sched.lock)
3117 }
3118 }
3119 if gp == nil {
3120 gp, inheritTime = runqget(_g_.m.p.ptr())
3121
3122
3123 }
3124 if gp == nil {
3125 gp, inheritTime = findrunnable()
3126 }
3127
3128
3129
3130
3131 if _g_.m.spinning {
3132 resetspinning()
3133 }
3134
3135 if sched.disable.user && !schedEnabled(gp) {
3136
3137
3138
3139 lock(&sched.lock)
3140 if schedEnabled(gp) {
3141
3142
3143 unlock(&sched.lock)
3144 } else {
3145 sched.disable.runnable.pushBack(gp)
3146 sched.disable.n++
3147 unlock(&sched.lock)
3148 goto top
3149 }
3150 }
3151
3152
3153
3154 if tryWakeP {
3155 wakep()
3156 }
3157 if gp.lockedm != 0 {
3158
3159
3160 startlockedm(gp)
3161 goto top
3162 }
3163
3164 execute(gp, inheritTime)
3165 }
3166
3167
3168
3169
3170
3171
3172
3173
3174 func dropg() {
3175 _g_ := getg()
3176
3177 setMNoWB(&_g_.m.curg.m, nil)
3178 setGNoWB(&_g_.m.curg, nil)
3179 }
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190 func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
3191
3192
3193 next := int64(atomic.Load64(&pp.timer0When))
3194 nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest))
3195 if next == 0 || (nextAdj != 0 && nextAdj < next) {
3196 next = nextAdj
3197 }
3198
3199 if next == 0 {
3200
3201 return now, 0, false
3202 }
3203
3204 if now == 0 {
3205 now = nanotime()
3206 }
3207 if now < next {
3208
3209
3210
3211
3212 if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) {
3213 return now, next, false
3214 }
3215 }
3216
3217 lock(&pp.timersLock)
3218
3219 if len(pp.timers) > 0 {
3220 adjusttimers(pp, now)
3221 for len(pp.timers) > 0 {
3222
3223
3224 if tw := runtimer(pp, now); tw != 0 {
3225 if tw > 0 {
3226 pollUntil = tw
3227 }
3228 break
3229 }
3230 ran = true
3231 }
3232 }
3233
3234
3235
3236
3237 if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 {
3238 clearDeletedTimers(pp)
3239 }
3240
3241 unlock(&pp.timersLock)
3242
3243 return now, pollUntil, ran
3244 }
3245
3246 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
3247 unlock((*mutex)(lock))
3248 return true
3249 }
3250
3251
3252 func park_m(gp *g) {
3253 _g_ := getg()
3254
3255 if trace.enabled {
3256 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
3257 }
3258
3259 casgstatus(gp, _Grunning, _Gwaiting)
3260 dropg()
3261
3262 if fn := _g_.m.waitunlockf; fn != nil {
3263 ok := fn(gp, _g_.m.waitlock)
3264 _g_.m.waitunlockf = nil
3265 _g_.m.waitlock = nil
3266 if !ok {
3267 if trace.enabled {
3268 traceGoUnpark(gp, 2)
3269 }
3270 casgstatus(gp, _Gwaiting, _Grunnable)
3271 execute(gp, true)
3272 }
3273 }
3274 schedule()
3275 }
3276
3277 func goschedImpl(gp *g) {
3278 status := readgstatus(gp)
3279 if status&^_Gscan != _Grunning {
3280 dumpgstatus(gp)
3281 throw("bad g status")
3282 }
3283 casgstatus(gp, _Grunning, _Grunnable)
3284 dropg()
3285 lock(&sched.lock)
3286 globrunqput(gp)
3287 unlock(&sched.lock)
3288
3289 schedule()
3290 }
3291
3292
3293 func gosched_m(gp *g) {
3294 if trace.enabled {
3295 traceGoSched()
3296 }
3297 goschedImpl(gp)
3298 }
3299
3300
3301 func goschedguarded_m(gp *g) {
3302
3303 if !canPreemptM(gp.m) {
3304 gogo(&gp.sched)
3305 }
3306
3307 if trace.enabled {
3308 traceGoSched()
3309 }
3310 goschedImpl(gp)
3311 }
3312
3313 func gopreempt_m(gp *g) {
3314 if trace.enabled {
3315 traceGoPreempt()
3316 }
3317 goschedImpl(gp)
3318 }
3319
3320
3321
3322
3323 func preemptPark(gp *g) {
3324 if trace.enabled {
3325 traceGoPark(traceEvGoBlock, 0)
3326 }
3327 status := readgstatus(gp)
3328 if status&^_Gscan != _Grunning {
3329 dumpgstatus(gp)
3330 throw("bad g status")
3331 }
3332 gp.waitreason = waitReasonPreempted
3333
3334
3335
3336
3337
3338
3339 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
3340 dropg()
3341 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
3342 schedule()
3343 }
3344
3345
3346
3347
3348 func goyield() {
3349 checkTimeouts()
3350 mcall(goyield_m)
3351 }
3352
3353 func goyield_m(gp *g) {
3354 if trace.enabled {
3355 traceGoPreempt()
3356 }
3357 pp := gp.m.p.ptr()
3358 casgstatus(gp, _Grunning, _Grunnable)
3359 dropg()
3360 runqput(pp, gp, false)
3361 schedule()
3362 }
3363
3364
3365 func goexit1() {
3366 if raceenabled {
3367 racegoend()
3368 }
3369 if trace.enabled {
3370 traceGoEnd()
3371 }
3372 mcall(goexit0)
3373 }
3374
3375
3376 func goexit0(gp *g) {
3377 _g_ := getg()
3378
3379 casgstatus(gp, _Grunning, _Gdead)
3380 if isSystemGoroutine(gp, false) {
3381 atomic.Xadd(&sched.ngsys, -1)
3382 }
3383 gp.m = nil
3384 locked := gp.lockedm != 0
3385 gp.lockedm = 0
3386 _g_.m.lockedg = 0
3387 gp.preemptStop = false
3388 gp.paniconfault = false
3389 gp._defer = nil
3390 gp._panic = nil
3391 gp.writebuf = nil
3392 gp.waitreason = 0
3393 gp.param = nil
3394 gp.labels = nil
3395 gp.timer = nil
3396
3397 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
3398
3399
3400
3401 assistWorkPerByte := float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
3402 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
3403 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
3404 gp.gcAssistBytes = 0
3405 }
3406
3407 dropg()
3408
3409 if GOARCH == "wasm" {
3410 gfput(_g_.m.p.ptr(), gp)
3411 schedule()
3412 }
3413
3414 if _g_.m.lockedInt != 0 {
3415 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
3416 throw("internal lockOSThread error")
3417 }
3418 gfput(_g_.m.p.ptr(), gp)
3419 if locked {
3420
3421
3422
3423
3424
3425
3426 if GOOS != "plan9" {
3427 gogo(&_g_.m.g0.sched)
3428 } else {
3429
3430
3431 _g_.m.lockedExt = 0
3432 }
3433 }
3434 schedule()
3435 }
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445 func save(pc, sp uintptr) {
3446 _g_ := getg()
3447
3448 _g_.sched.pc = pc
3449 _g_.sched.sp = sp
3450 _g_.sched.lr = 0
3451 _g_.sched.ret = 0
3452 _g_.sched.g = guintptr(unsafe.Pointer(_g_))
3453
3454
3455
3456 if _g_.sched.ctxt != nil {
3457 badctxt()
3458 }
3459 }
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498 func reentersyscall(pc, sp uintptr) {
3499 _g_ := getg()
3500
3501
3502
3503 _g_.m.locks++
3504
3505
3506
3507
3508
3509 _g_.stackguard0 = stackPreempt
3510 _g_.throwsplit = true
3511
3512
3513 save(pc, sp)
3514 _g_.syscallsp = sp
3515 _g_.syscallpc = pc
3516 casgstatus(_g_, _Grunning, _Gsyscall)
3517 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3518 systemstack(func() {
3519 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3520 throw("entersyscall")
3521 })
3522 }
3523
3524 if trace.enabled {
3525 systemstack(traceGoSysCall)
3526
3527
3528
3529 save(pc, sp)
3530 }
3531
3532 if atomic.Load(&sched.sysmonwait) != 0 {
3533 systemstack(entersyscall_sysmon)
3534 save(pc, sp)
3535 }
3536
3537 if _g_.m.p.ptr().runSafePointFn != 0 {
3538
3539 systemstack(runSafePointFn)
3540 save(pc, sp)
3541 }
3542
3543 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
3544 _g_.sysblocktraced = true
3545 pp := _g_.m.p.ptr()
3546 pp.m = 0
3547 _g_.m.oldp.set(pp)
3548 _g_.m.p = 0
3549 atomic.Store(&pp.status, _Psyscall)
3550 if sched.gcwaiting != 0 {
3551 systemstack(entersyscall_gcwait)
3552 save(pc, sp)
3553 }
3554
3555 _g_.m.locks--
3556 }
3557
3558
3559
3560
3561
3562
3563
3564 func entersyscall() {
3565 reentersyscall(getcallerpc(), getcallersp())
3566 }
3567
3568 func entersyscall_sysmon() {
3569 lock(&sched.lock)
3570 if atomic.Load(&sched.sysmonwait) != 0 {
3571 atomic.Store(&sched.sysmonwait, 0)
3572 notewakeup(&sched.sysmonnote)
3573 }
3574 unlock(&sched.lock)
3575 }
3576
3577 func entersyscall_gcwait() {
3578 _g_ := getg()
3579 _p_ := _g_.m.oldp.ptr()
3580
3581 lock(&sched.lock)
3582 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
3583 if trace.enabled {
3584 traceGoSysBlock(_p_)
3585 traceProcStop(_p_)
3586 }
3587 _p_.syscalltick++
3588 if sched.stopwait--; sched.stopwait == 0 {
3589 notewakeup(&sched.stopnote)
3590 }
3591 }
3592 unlock(&sched.lock)
3593 }
3594
3595
3596
3597 func entersyscallblock() {
3598 _g_ := getg()
3599
3600 _g_.m.locks++
3601 _g_.throwsplit = true
3602 _g_.stackguard0 = stackPreempt
3603 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
3604 _g_.sysblocktraced = true
3605 _g_.m.p.ptr().syscalltick++
3606
3607
3608 pc := getcallerpc()
3609 sp := getcallersp()
3610 save(pc, sp)
3611 _g_.syscallsp = _g_.sched.sp
3612 _g_.syscallpc = _g_.sched.pc
3613 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3614 sp1 := sp
3615 sp2 := _g_.sched.sp
3616 sp3 := _g_.syscallsp
3617 systemstack(func() {
3618 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3619 throw("entersyscallblock")
3620 })
3621 }
3622 casgstatus(_g_, _Grunning, _Gsyscall)
3623 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3624 systemstack(func() {
3625 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3626 throw("entersyscallblock")
3627 })
3628 }
3629
3630 systemstack(entersyscallblock_handoff)
3631
3632
3633 save(getcallerpc(), getcallersp())
3634
3635 _g_.m.locks--
3636 }
3637
3638 func entersyscallblock_handoff() {
3639 if trace.enabled {
3640 traceGoSysCall()
3641 traceGoSysBlock(getg().m.p.ptr())
3642 }
3643 handoffp(releasep())
3644 }
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658 func exitsyscall() {
3659 _g_ := getg()
3660
3661 _g_.m.locks++
3662 if getcallersp() > _g_.syscallsp {
3663 throw("exitsyscall: syscall frame is no longer valid")
3664 }
3665
3666 _g_.waitsince = 0
3667 oldp := _g_.m.oldp.ptr()
3668 _g_.m.oldp = 0
3669 if exitsyscallfast(oldp) {
3670 if trace.enabled {
3671 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
3672 systemstack(traceGoStart)
3673 }
3674 }
3675
3676 _g_.m.p.ptr().syscalltick++
3677
3678 casgstatus(_g_, _Gsyscall, _Grunning)
3679
3680
3681
3682 _g_.syscallsp = 0
3683 _g_.m.locks--
3684 if _g_.preempt {
3685
3686 _g_.stackguard0 = stackPreempt
3687 } else {
3688
3689 _g_.stackguard0 = _g_.stack.lo + _StackGuard
3690 }
3691 _g_.throwsplit = false
3692
3693 if sched.disable.user && !schedEnabled(_g_) {
3694
3695 Gosched()
3696 }
3697
3698 return
3699 }
3700
3701 _g_.sysexitticks = 0
3702 if trace.enabled {
3703
3704
3705 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
3706 osyield()
3707 }
3708
3709
3710
3711
3712 _g_.sysexitticks = cputicks()
3713 }
3714
3715 _g_.m.locks--
3716
3717
3718 mcall(exitsyscall0)
3719
3720
3721
3722
3723
3724
3725
3726 _g_.syscallsp = 0
3727 _g_.m.p.ptr().syscalltick++
3728 _g_.throwsplit = false
3729 }
3730
3731
3732 func exitsyscallfast(oldp *p) bool {
3733 _g_ := getg()
3734
3735
3736 if sched.stopwait == freezeStopWait {
3737 return false
3738 }
3739
3740
3741 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
3742
3743 wirep(oldp)
3744 exitsyscallfast_reacquired()
3745 return true
3746 }
3747
3748
3749 if sched.pidle != 0 {
3750 var ok bool
3751 systemstack(func() {
3752 ok = exitsyscallfast_pidle()
3753 if ok && trace.enabled {
3754 if oldp != nil {
3755
3756
3757 for oldp.syscalltick == _g_.m.syscalltick {
3758 osyield()
3759 }
3760 }
3761 traceGoSysExit(0)
3762 }
3763 })
3764 if ok {
3765 return true
3766 }
3767 }
3768 return false
3769 }
3770
3771
3772
3773
3774
3775
3776 func exitsyscallfast_reacquired() {
3777 _g_ := getg()
3778 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
3779 if trace.enabled {
3780
3781
3782
3783 systemstack(func() {
3784
3785 traceGoSysBlock(_g_.m.p.ptr())
3786
3787 traceGoSysExit(0)
3788 })
3789 }
3790 _g_.m.p.ptr().syscalltick++
3791 }
3792 }
3793
3794 func exitsyscallfast_pidle() bool {
3795 lock(&sched.lock)
3796 _p_ := pidleget()
3797 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
3798 atomic.Store(&sched.sysmonwait, 0)
3799 notewakeup(&sched.sysmonnote)
3800 }
3801 unlock(&sched.lock)
3802 if _p_ != nil {
3803 acquirep(_p_)
3804 return true
3805 }
3806 return false
3807 }
3808
3809
3810
3811
3812
3813 func exitsyscall0(gp *g) {
3814 _g_ := getg()
3815
3816 casgstatus(gp, _Gsyscall, _Grunnable)
3817 dropg()
3818 lock(&sched.lock)
3819 var _p_ *p
3820 if schedEnabled(_g_) {
3821 _p_ = pidleget()
3822 }
3823 if _p_ == nil {
3824 globrunqput(gp)
3825 } else if atomic.Load(&sched.sysmonwait) != 0 {
3826 atomic.Store(&sched.sysmonwait, 0)
3827 notewakeup(&sched.sysmonnote)
3828 }
3829 unlock(&sched.lock)
3830 if _p_ != nil {
3831 acquirep(_p_)
3832 execute(gp, false)
3833 }
3834 if _g_.m.lockedg != 0 {
3835
3836 stoplockedm()
3837 execute(gp, false)
3838 }
3839 stopm()
3840 schedule()
3841 }
3842
3843 func beforefork() {
3844 gp := getg().m.curg
3845
3846
3847
3848
3849 gp.m.locks++
3850 sigsave(&gp.m.sigmask)
3851 sigblock(false)
3852
3853
3854
3855
3856
3857 gp.stackguard0 = stackFork
3858 }
3859
3860
3861
3862
3863 func syscall_runtime_BeforeFork() {
3864 systemstack(beforefork)
3865 }
3866
3867 func afterfork() {
3868 gp := getg().m.curg
3869
3870
3871 gp.stackguard0 = gp.stack.lo + _StackGuard
3872
3873 msigrestore(gp.m.sigmask)
3874
3875 gp.m.locks--
3876 }
3877
3878
3879
3880
3881 func syscall_runtime_AfterFork() {
3882 systemstack(afterfork)
3883 }
3884
3885
3886
3887 var inForkedChild bool
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900 func syscall_runtime_AfterForkInChild() {
3901
3902
3903
3904
3905 inForkedChild = true
3906
3907 clearSignalHandlers()
3908
3909
3910
3911 msigrestore(getg().m.sigmask)
3912
3913 inForkedChild = false
3914 }
3915
3916
3917
3918
3919 var pendingPreemptSignals uint32
3920
3921
3922
3923 func syscall_runtime_BeforeExec() {
3924
3925 execLock.lock()
3926
3927
3928
3929 if GOOS == "darwin" || GOOS == "ios" {
3930 for int32(atomic.Load(&pendingPreemptSignals)) > 0 {
3931 osyield()
3932 }
3933 }
3934 }
3935
3936
3937
3938 func syscall_runtime_AfterExec() {
3939 execLock.unlock()
3940 }
3941
3942
3943 func malg(stacksize int32) *g {
3944 newg := new(g)
3945 if stacksize >= 0 {
3946 stacksize = round2(_StackSystem + stacksize)
3947 systemstack(func() {
3948 newg.stack = stackalloc(uint32(stacksize))
3949 })
3950 newg.stackguard0 = newg.stack.lo + _StackGuard
3951 newg.stackguard1 = ^uintptr(0)
3952
3953
3954 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
3955 }
3956 return newg
3957 }
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974 func newproc(siz int32, fn *funcval) {
3975 argp := add(unsafe.Pointer(&fn), sys.PtrSize)
3976 gp := getg()
3977 pc := getcallerpc()
3978 systemstack(func() {
3979 newg := newproc1(fn, argp, siz, gp, pc)
3980
3981 _p_ := getg().m.p.ptr()
3982 runqput(_p_, newg, true)
3983
3984 if mainStarted {
3985 wakep()
3986 }
3987 })
3988 }
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999 func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerpc uintptr) *g {
4000 _g_ := getg()
4001
4002 if fn == nil {
4003 _g_.m.throwing = -1
4004 throw("go of nil func value")
4005 }
4006 acquirem()
4007 siz := narg
4008 siz = (siz + 7) &^ 7
4009
4010
4011
4012
4013
4014 if siz >= _StackMin-4*sys.RegSize-sys.RegSize {
4015 throw("newproc: function arguments too large for new goroutine")
4016 }
4017
4018 _p_ := _g_.m.p.ptr()
4019 newg := gfget(_p_)
4020 if newg == nil {
4021 newg = malg(_StackMin)
4022 casgstatus(newg, _Gidle, _Gdead)
4023 allgadd(newg)
4024 }
4025 if newg.stack.hi == 0 {
4026 throw("newproc1: newg missing stack")
4027 }
4028
4029 if readgstatus(newg) != _Gdead {
4030 throw("newproc1: new g is not Gdead")
4031 }
4032
4033 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize
4034 totalSize += -totalSize & (sys.SpAlign - 1)
4035 sp := newg.stack.hi - totalSize
4036 spArg := sp
4037 if usesLR {
4038
4039 *(*uintptr)(unsafe.Pointer(sp)) = 0
4040 prepGoExitFrame(sp)
4041 spArg += sys.MinFrameSize
4042 }
4043 if narg > 0 {
4044 memmove(unsafe.Pointer(spArg), argp, uintptr(narg))
4045
4046
4047
4048
4049
4050
4051 if writeBarrier.needed && !_g_.m.curg.gcscandone {
4052 f := findfunc(fn.fn)
4053 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
4054 if stkmap.nbit > 0 {
4055
4056 bv := stackmapdata(stkmap, 0)
4057 bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata)
4058 }
4059 }
4060 }
4061
4062 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
4063 newg.sched.sp = sp
4064 newg.stktopsp = sp
4065 newg.sched.pc = funcPC(goexit) + sys.PCQuantum
4066 newg.sched.g = guintptr(unsafe.Pointer(newg))
4067 gostartcallfn(&newg.sched, fn)
4068 newg.gopc = callerpc
4069 newg.ancestors = saveAncestors(callergp)
4070 newg.startpc = fn.fn
4071 if _g_.m.curg != nil {
4072 newg.labels = _g_.m.curg.labels
4073 }
4074 if isSystemGoroutine(newg, false) {
4075 atomic.Xadd(&sched.ngsys, +1)
4076 }
4077 casgstatus(newg, _Gdead, _Grunnable)
4078
4079 if _p_.goidcache == _p_.goidcacheend {
4080
4081
4082
4083 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
4084 _p_.goidcache -= _GoidCacheBatch - 1
4085 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
4086 }
4087 newg.goid = int64(_p_.goidcache)
4088 _p_.goidcache++
4089 if raceenabled {
4090 newg.racectx = racegostart(callerpc)
4091 }
4092 if trace.enabled {
4093 traceGoCreate(newg, newg.startpc)
4094 }
4095 releasem(_g_.m)
4096
4097 return newg
4098 }
4099
4100
4101
4102
4103 func saveAncestors(callergp *g) *[]ancestorInfo {
4104
4105 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
4106 return nil
4107 }
4108 var callerAncestors []ancestorInfo
4109 if callergp.ancestors != nil {
4110 callerAncestors = *callergp.ancestors
4111 }
4112 n := int32(len(callerAncestors)) + 1
4113 if n > debug.tracebackancestors {
4114 n = debug.tracebackancestors
4115 }
4116 ancestors := make([]ancestorInfo, n)
4117 copy(ancestors[1:], callerAncestors)
4118
4119 var pcs [_TracebackMaxFrames]uintptr
4120 npcs := gcallers(callergp, 0, pcs[:])
4121 ipcs := make([]uintptr, npcs)
4122 copy(ipcs, pcs[:])
4123 ancestors[0] = ancestorInfo{
4124 pcs: ipcs,
4125 goid: callergp.goid,
4126 gopc: callergp.gopc,
4127 }
4128
4129 ancestorsp := new([]ancestorInfo)
4130 *ancestorsp = ancestors
4131 return ancestorsp
4132 }
4133
4134
4135
4136 func gfput(_p_ *p, gp *g) {
4137 if readgstatus(gp) != _Gdead {
4138 throw("gfput: bad status (not Gdead)")
4139 }
4140
4141 stksize := gp.stack.hi - gp.stack.lo
4142
4143 if stksize != _FixedStack {
4144
4145 stackfree(gp.stack)
4146 gp.stack.lo = 0
4147 gp.stack.hi = 0
4148 gp.stackguard0 = 0
4149 }
4150
4151 _p_.gFree.push(gp)
4152 _p_.gFree.n++
4153 if _p_.gFree.n >= 64 {
4154 lock(&sched.gFree.lock)
4155 for _p_.gFree.n >= 32 {
4156 _p_.gFree.n--
4157 gp = _p_.gFree.pop()
4158 if gp.stack.lo == 0 {
4159 sched.gFree.noStack.push(gp)
4160 } else {
4161 sched.gFree.stack.push(gp)
4162 }
4163 sched.gFree.n++
4164 }
4165 unlock(&sched.gFree.lock)
4166 }
4167 }
4168
4169
4170
4171 func gfget(_p_ *p) *g {
4172 retry:
4173 if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
4174 lock(&sched.gFree.lock)
4175
4176 for _p_.gFree.n < 32 {
4177
4178 gp := sched.gFree.stack.pop()
4179 if gp == nil {
4180 gp = sched.gFree.noStack.pop()
4181 if gp == nil {
4182 break
4183 }
4184 }
4185 sched.gFree.n--
4186 _p_.gFree.push(gp)
4187 _p_.gFree.n++
4188 }
4189 unlock(&sched.gFree.lock)
4190 goto retry
4191 }
4192 gp := _p_.gFree.pop()
4193 if gp == nil {
4194 return nil
4195 }
4196 _p_.gFree.n--
4197 if gp.stack.lo == 0 {
4198
4199 systemstack(func() {
4200 gp.stack = stackalloc(_FixedStack)
4201 })
4202 gp.stackguard0 = gp.stack.lo + _StackGuard
4203 } else {
4204 if raceenabled {
4205 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4206 }
4207 if msanenabled {
4208 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4209 }
4210 }
4211 return gp
4212 }
4213
4214
4215 func gfpurge(_p_ *p) {
4216 lock(&sched.gFree.lock)
4217 for !_p_.gFree.empty() {
4218 gp := _p_.gFree.pop()
4219 _p_.gFree.n--
4220 if gp.stack.lo == 0 {
4221 sched.gFree.noStack.push(gp)
4222 } else {
4223 sched.gFree.stack.push(gp)
4224 }
4225 sched.gFree.n++
4226 }
4227 unlock(&sched.gFree.lock)
4228 }
4229
4230
4231 func Breakpoint() {
4232 breakpoint()
4233 }
4234
4235
4236
4237
4238
4239 func dolockOSThread() {
4240 if GOARCH == "wasm" {
4241 return
4242 }
4243 _g_ := getg()
4244 _g_.m.lockedg.set(_g_)
4245 _g_.lockedm.set(_g_.m)
4246 }
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264 func LockOSThread() {
4265 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
4266
4267
4268
4269 startTemplateThread()
4270 }
4271 _g_ := getg()
4272 _g_.m.lockedExt++
4273 if _g_.m.lockedExt == 0 {
4274 _g_.m.lockedExt--
4275 panic("LockOSThread nesting overflow")
4276 }
4277 dolockOSThread()
4278 }
4279
4280
4281 func lockOSThread() {
4282 getg().m.lockedInt++
4283 dolockOSThread()
4284 }
4285
4286
4287
4288
4289
4290 func dounlockOSThread() {
4291 if GOARCH == "wasm" {
4292 return
4293 }
4294 _g_ := getg()
4295 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
4296 return
4297 }
4298 _g_.m.lockedg = 0
4299 _g_.lockedm = 0
4300 }
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316 func UnlockOSThread() {
4317 _g_ := getg()
4318 if _g_.m.lockedExt == 0 {
4319 return
4320 }
4321 _g_.m.lockedExt--
4322 dounlockOSThread()
4323 }
4324
4325
4326 func unlockOSThread() {
4327 _g_ := getg()
4328 if _g_.m.lockedInt == 0 {
4329 systemstack(badunlockosthread)
4330 }
4331 _g_.m.lockedInt--
4332 dounlockOSThread()
4333 }
4334
4335 func badunlockosthread() {
4336 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
4337 }
4338
4339 func gcount() int32 {
4340 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
4341 for _, _p_ := range allp {
4342 n -= _p_.gFree.n
4343 }
4344
4345
4346
4347 if n < 1 {
4348 n = 1
4349 }
4350 return n
4351 }
4352
4353 func mcount() int32 {
4354 return int32(sched.mnext - sched.nmfreed)
4355 }
4356
4357 var prof struct {
4358 signalLock uint32
4359 hz int32
4360 }
4361
4362 func _System() { _System() }
4363 func _ExternalCode() { _ExternalCode() }
4364 func _LostExternalCode() { _LostExternalCode() }
4365 func _GC() { _GC() }
4366 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
4367 func _VDSO() { _VDSO() }
4368
4369
4370
4371
4372 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
4373 if prof.hz == 0 {
4374 return
4375 }
4376
4377
4378
4379
4380 if mp != nil && mp.profilehz == 0 {
4381 return
4382 }
4383
4384
4385
4386
4387
4388
4389
4390 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
4391 if f := findfunc(pc); f.valid() {
4392 if hasPrefix(funcname(f), "runtime/internal/atomic") {
4393 cpuprof.lostAtomic++
4394 return
4395 }
4396 }
4397 }
4398
4399
4400
4401
4402
4403
4404
4405 getg().m.mallocing++
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472 traceback := true
4473 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) || (mp != nil && mp.vdsoSP != 0) {
4474 traceback = false
4475 }
4476 var stk [maxCPUProfStack]uintptr
4477 n := 0
4478 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
4479 cgoOff := 0
4480
4481
4482
4483
4484
4485 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
4486 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
4487 cgoOff++
4488 }
4489 copy(stk[:], mp.cgoCallers[:cgoOff])
4490 mp.cgoCallers[0] = 0
4491 }
4492
4493
4494 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0)
4495 if n > 0 {
4496 n += cgoOff
4497 }
4498 } else if traceback {
4499 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
4500 }
4501
4502 if n <= 0 {
4503
4504
4505 n = 0
4506 if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
4507
4508
4509 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
4510 }
4511 if n == 0 && mp != nil && mp.vdsoSP != 0 {
4512 n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
4513 }
4514 if n == 0 {
4515
4516 n = 2
4517 if inVDSOPage(pc) {
4518 pc = funcPC(_VDSO) + sys.PCQuantum
4519 } else if pc > firstmoduledata.etext {
4520
4521 pc = funcPC(_ExternalCode) + sys.PCQuantum
4522 }
4523 stk[0] = pc
4524 if mp.preemptoff != "" {
4525 stk[1] = funcPC(_GC) + sys.PCQuantum
4526 } else {
4527 stk[1] = funcPC(_System) + sys.PCQuantum
4528 }
4529 }
4530 }
4531
4532 if prof.hz != 0 {
4533 cpuprof.add(gp, stk[:n])
4534 }
4535 getg().m.mallocing--
4536 }
4537
4538
4539
4540
4541 var sigprofCallers cgoCallers
4542 var sigprofCallersUse uint32
4543
4544
4545
4546
4547
4548
4549
4550 func sigprofNonGo() {
4551 if prof.hz != 0 {
4552 n := 0
4553 for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
4554 n++
4555 }
4556 cpuprof.addNonGo(sigprofCallers[:n])
4557 }
4558
4559 atomic.Store(&sigprofCallersUse, 0)
4560 }
4561
4562
4563
4564
4565
4566
4567 func sigprofNonGoPC(pc uintptr) {
4568 if prof.hz != 0 {
4569 stk := []uintptr{
4570 pc,
4571 funcPC(_ExternalCode) + sys.PCQuantum,
4572 }
4573 cpuprof.addNonGo(stk)
4574 }
4575 }
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587 func setsSP(pc uintptr) bool {
4588 f := findfunc(pc)
4589 if !f.valid() {
4590
4591
4592 return true
4593 }
4594 switch f.funcID {
4595 case funcID_gogo, funcID_systemstack, funcID_mcall, funcID_morestack:
4596 return true
4597 }
4598 return false
4599 }
4600
4601
4602
4603 func setcpuprofilerate(hz int32) {
4604
4605 if hz < 0 {
4606 hz = 0
4607 }
4608
4609
4610
4611 _g_ := getg()
4612 _g_.m.locks++
4613
4614
4615
4616
4617 setThreadCPUProfiler(0)
4618
4619 for !atomic.Cas(&prof.signalLock, 0, 1) {
4620 osyield()
4621 }
4622 if prof.hz != hz {
4623 setProcessCPUProfiler(hz)
4624 prof.hz = hz
4625 }
4626 atomic.Store(&prof.signalLock, 0)
4627
4628 lock(&sched.lock)
4629 sched.profilehz = hz
4630 unlock(&sched.lock)
4631
4632 if hz != 0 {
4633 setThreadCPUProfiler(hz)
4634 }
4635
4636 _g_.m.locks--
4637 }
4638
4639
4640
4641 func (pp *p) init(id int32) {
4642 pp.id = id
4643 pp.status = _Pgcstop
4644 pp.sudogcache = pp.sudogbuf[:0]
4645 for i := range pp.deferpool {
4646 pp.deferpool[i] = pp.deferpoolbuf[i][:0]
4647 }
4648 pp.wbBuf.reset()
4649 if pp.mcache == nil {
4650 if id == 0 {
4651 if mcache0 == nil {
4652 throw("missing mcache?")
4653 }
4654
4655
4656 pp.mcache = mcache0
4657 } else {
4658 pp.mcache = allocmcache()
4659 }
4660 }
4661 if raceenabled && pp.raceprocctx == 0 {
4662 if id == 0 {
4663 pp.raceprocctx = raceprocctx0
4664 raceprocctx0 = 0
4665 } else {
4666 pp.raceprocctx = raceproccreate()
4667 }
4668 }
4669 lockInit(&pp.timersLock, lockRankTimers)
4670
4671
4672
4673 timerpMask.set(id)
4674
4675
4676 idlepMask.clear(id)
4677 }
4678
4679
4680
4681
4682
4683 func (pp *p) destroy() {
4684 assertLockHeld(&sched.lock)
4685 assertWorldStopped()
4686
4687
4688 for pp.runqhead != pp.runqtail {
4689
4690 pp.runqtail--
4691 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
4692
4693 globrunqputhead(gp)
4694 }
4695 if pp.runnext != 0 {
4696 globrunqputhead(pp.runnext.ptr())
4697 pp.runnext = 0
4698 }
4699 if len(pp.timers) > 0 {
4700 plocal := getg().m.p.ptr()
4701
4702
4703
4704
4705 lock(&plocal.timersLock)
4706 lock(&pp.timersLock)
4707 moveTimers(plocal, pp.timers)
4708 pp.timers = nil
4709 pp.numTimers = 0
4710 pp.adjustTimers = 0
4711 pp.deletedTimers = 0
4712 atomic.Store64(&pp.timer0When, 0)
4713 unlock(&pp.timersLock)
4714 unlock(&plocal.timersLock)
4715 }
4716
4717 if gcphase != _GCoff {
4718 wbBufFlush1(pp)
4719 pp.gcw.dispose()
4720 }
4721 for i := range pp.sudogbuf {
4722 pp.sudogbuf[i] = nil
4723 }
4724 pp.sudogcache = pp.sudogbuf[:0]
4725 for i := range pp.deferpool {
4726 for j := range pp.deferpoolbuf[i] {
4727 pp.deferpoolbuf[i][j] = nil
4728 }
4729 pp.deferpool[i] = pp.deferpoolbuf[i][:0]
4730 }
4731 systemstack(func() {
4732 for i := 0; i < pp.mspancache.len; i++ {
4733
4734 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
4735 }
4736 pp.mspancache.len = 0
4737 lock(&mheap_.lock)
4738 pp.pcache.flush(&mheap_.pages)
4739 unlock(&mheap_.lock)
4740 })
4741 freemcache(pp.mcache)
4742 pp.mcache = nil
4743 gfpurge(pp)
4744 traceProcFree(pp)
4745 if raceenabled {
4746 if pp.timerRaceCtx != 0 {
4747
4748
4749
4750
4751
4752 mp := getg().m
4753 phold := mp.p.ptr()
4754 mp.p.set(pp)
4755
4756 racectxend(pp.timerRaceCtx)
4757 pp.timerRaceCtx = 0
4758
4759 mp.p.set(phold)
4760 }
4761 raceprocdestroy(pp.raceprocctx)
4762 pp.raceprocctx = 0
4763 }
4764 pp.gcAssistTime = 0
4765 pp.status = _Pdead
4766 }
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776 func procresize(nprocs int32) *p {
4777 assertLockHeld(&sched.lock)
4778 assertWorldStopped()
4779
4780 old := gomaxprocs
4781 if old < 0 || nprocs <= 0 {
4782 throw("procresize: invalid arg")
4783 }
4784 if trace.enabled {
4785 traceGomaxprocs(nprocs)
4786 }
4787
4788
4789 now := nanotime()
4790 if sched.procresizetime != 0 {
4791 sched.totaltime += int64(old) * (now - sched.procresizetime)
4792 }
4793 sched.procresizetime = now
4794
4795 maskWords := (nprocs + 31) / 32
4796
4797
4798 if nprocs > int32(len(allp)) {
4799
4800
4801 lock(&allpLock)
4802 if nprocs <= int32(cap(allp)) {
4803 allp = allp[:nprocs]
4804 } else {
4805 nallp := make([]*p, nprocs)
4806
4807
4808 copy(nallp, allp[:cap(allp)])
4809 allp = nallp
4810 }
4811
4812 if maskWords <= int32(cap(idlepMask)) {
4813 idlepMask = idlepMask[:maskWords]
4814 timerpMask = timerpMask[:maskWords]
4815 } else {
4816 nidlepMask := make([]uint32, maskWords)
4817
4818 copy(nidlepMask, idlepMask)
4819 idlepMask = nidlepMask
4820
4821 ntimerpMask := make([]uint32, maskWords)
4822 copy(ntimerpMask, timerpMask)
4823 timerpMask = ntimerpMask
4824 }
4825 unlock(&allpLock)
4826 }
4827
4828
4829 for i := old; i < nprocs; i++ {
4830 pp := allp[i]
4831 if pp == nil {
4832 pp = new(p)
4833 }
4834 pp.init(i)
4835 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
4836 }
4837
4838 _g_ := getg()
4839 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
4840
4841 _g_.m.p.ptr().status = _Prunning
4842 _g_.m.p.ptr().mcache.prepareForSweep()
4843 } else {
4844
4845
4846
4847
4848
4849 if _g_.m.p != 0 {
4850 if trace.enabled {
4851
4852
4853
4854 traceGoSched()
4855 traceProcStop(_g_.m.p.ptr())
4856 }
4857 _g_.m.p.ptr().m = 0
4858 }
4859 _g_.m.p = 0
4860 p := allp[0]
4861 p.m = 0
4862 p.status = _Pidle
4863 acquirep(p)
4864 if trace.enabled {
4865 traceGoStart()
4866 }
4867 }
4868
4869
4870 mcache0 = nil
4871
4872
4873 for i := nprocs; i < old; i++ {
4874 p := allp[i]
4875 p.destroy()
4876
4877 }
4878
4879
4880 if int32(len(allp)) != nprocs {
4881 lock(&allpLock)
4882 allp = allp[:nprocs]
4883 idlepMask = idlepMask[:maskWords]
4884 timerpMask = timerpMask[:maskWords]
4885 unlock(&allpLock)
4886 }
4887
4888 var runnablePs *p
4889 for i := nprocs - 1; i >= 0; i-- {
4890 p := allp[i]
4891 if _g_.m.p.ptr() == p {
4892 continue
4893 }
4894 p.status = _Pidle
4895 if runqempty(p) {
4896 pidleput(p)
4897 } else {
4898 p.m.set(mget())
4899 p.link.set(runnablePs)
4900 runnablePs = p
4901 }
4902 }
4903 stealOrder.reset(uint32(nprocs))
4904 var int32p *int32 = &gomaxprocs
4905 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
4906 return runnablePs
4907 }
4908
4909
4910
4911
4912
4913
4914
4915 func acquirep(_p_ *p) {
4916
4917 wirep(_p_)
4918
4919
4920
4921
4922
4923 _p_.mcache.prepareForSweep()
4924
4925 if trace.enabled {
4926 traceProcStart()
4927 }
4928 }
4929
4930
4931
4932
4933
4934
4935
4936 func wirep(_p_ *p) {
4937 _g_ := getg()
4938
4939 if _g_.m.p != 0 {
4940 throw("wirep: already in go")
4941 }
4942 if _p_.m != 0 || _p_.status != _Pidle {
4943 id := int64(0)
4944 if _p_.m != 0 {
4945 id = _p_.m.ptr().id
4946 }
4947 print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
4948 throw("wirep: invalid p state")
4949 }
4950 _g_.m.p.set(_p_)
4951 _p_.m.set(_g_.m)
4952 _p_.status = _Prunning
4953 }
4954
4955
4956 func releasep() *p {
4957 _g_ := getg()
4958
4959 if _g_.m.p == 0 {
4960 throw("releasep: invalid arg")
4961 }
4962 _p_ := _g_.m.p.ptr()
4963 if _p_.m.ptr() != _g_.m || _p_.status != _Prunning {
4964 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n")
4965 throw("releasep: invalid p state")
4966 }
4967 if trace.enabled {
4968 traceProcStop(_g_.m.p.ptr())
4969 }
4970 _g_.m.p = 0
4971 _p_.m = 0
4972 _p_.status = _Pidle
4973 return _p_
4974 }
4975
4976 func incidlelocked(v int32) {
4977 lock(&sched.lock)
4978 sched.nmidlelocked += v
4979 if v > 0 {
4980 checkdead()
4981 }
4982 unlock(&sched.lock)
4983 }
4984
4985
4986
4987
4988 func checkdead() {
4989 assertLockHeld(&sched.lock)
4990
4991
4992
4993
4994 if islibrary || isarchive {
4995 return
4996 }
4997
4998
4999
5000
5001
5002 if panicking > 0 {
5003 return
5004 }
5005
5006
5007
5008
5009
5010 var run0 int32
5011 if !iscgo && cgoHasExtraM {
5012 mp := lockextra(true)
5013 haveExtraM := extraMCount > 0
5014 unlockextra(mp)
5015 if haveExtraM {
5016 run0 = 1
5017 }
5018 }
5019
5020 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
5021 if run > run0 {
5022 return
5023 }
5024 if run < 0 {
5025 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
5026 throw("checkdead: inconsistent counts")
5027 }
5028
5029 grunning := 0
5030 lock(&allglock)
5031 for i := 0; i < len(allgs); i++ {
5032 gp := allgs[i]
5033 if isSystemGoroutine(gp, false) {
5034 continue
5035 }
5036 s := readgstatus(gp)
5037 switch s &^ _Gscan {
5038 case _Gwaiting,
5039 _Gpreempted:
5040 grunning++
5041 case _Grunnable,
5042 _Grunning,
5043 _Gsyscall:
5044 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
5045 throw("checkdead: runnable g")
5046 }
5047 }
5048 unlock(&allglock)
5049 if grunning == 0 {
5050 unlock(&sched.lock)
5051 throw("no goroutines (main called runtime.Goexit) - deadlock!")
5052 }
5053
5054
5055 if faketime != 0 {
5056 when, _p_ := timeSleepUntil()
5057 if _p_ != nil {
5058 faketime = when
5059 for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link {
5060 if (*pp).ptr() == _p_ {
5061 *pp = _p_.link
5062 break
5063 }
5064 }
5065 mp := mget()
5066 if mp == nil {
5067
5068
5069 throw("checkdead: no m for timer")
5070 }
5071 mp.nextp.set(_p_)
5072 notewakeup(&mp.park)
5073 return
5074 }
5075 }
5076
5077
5078 for _, _p_ := range allp {
5079 if len(_p_.timers) > 0 {
5080 return
5081 }
5082 }
5083
5084 getg().m.throwing = -1
5085 unlock(&sched.lock)
5086 throw("all goroutines are asleep - deadlock!")
5087 }
5088
5089
5090
5091
5092
5093
5094 var forcegcperiod int64 = 2 * 60 * 1e9
5095
5096
5097
5098
5099 func sysmon() {
5100 lock(&sched.lock)
5101 sched.nmsys++
5102 checkdead()
5103 unlock(&sched.lock)
5104
5105
5106
5107 atomic.Store(&sched.sysmonStarting, 0)
5108
5109 lasttrace := int64(0)
5110 idle := 0
5111 delay := uint32(0)
5112
5113 for {
5114 if idle == 0 {
5115 delay = 20
5116 } else if idle > 50 {
5117 delay *= 2
5118 }
5119 if delay > 10*1000 {
5120 delay = 10 * 1000
5121 }
5122 usleep(delay)
5123 mDoFixup()
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140 now := nanotime()
5141 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
5142 lock(&sched.lock)
5143 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
5144 syscallWake := false
5145 next, _ := timeSleepUntil()
5146 if next > now {
5147 atomic.Store(&sched.sysmonwait, 1)
5148 unlock(&sched.lock)
5149
5150
5151 sleep := forcegcperiod / 2
5152 if next-now < sleep {
5153 sleep = next - now
5154 }
5155 shouldRelax := sleep >= osRelaxMinNS
5156 if shouldRelax {
5157 osRelax(true)
5158 }
5159 syscallWake = notetsleep(&sched.sysmonnote, sleep)
5160 mDoFixup()
5161 if shouldRelax {
5162 osRelax(false)
5163 }
5164 lock(&sched.lock)
5165 atomic.Store(&sched.sysmonwait, 0)
5166 noteclear(&sched.sysmonnote)
5167 }
5168 if syscallWake {
5169 idle = 0
5170 delay = 20
5171 }
5172 }
5173 unlock(&sched.lock)
5174 }
5175
5176 lock(&sched.sysmonlock)
5177
5178
5179 now = nanotime()
5180
5181
5182 if *cgo_yield != nil {
5183 asmcgocall(*cgo_yield, nil)
5184 }
5185
5186 lastpoll := int64(atomic.Load64(&sched.lastpoll))
5187 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
5188 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
5189 list := netpoll(0)
5190 if !list.empty() {
5191
5192
5193
5194
5195
5196
5197
5198 incidlelocked(-1)
5199 injectglist(&list)
5200 incidlelocked(1)
5201 }
5202 }
5203 mDoFixup()
5204 if GOOS == "netbsd" {
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220 if next, _ := timeSleepUntil(); next < now {
5221 startm(nil, false)
5222 }
5223 }
5224 if atomic.Load(&scavenge.sysmonWake) != 0 {
5225
5226 wakeScavenger()
5227 }
5228
5229
5230 if retake(now) != 0 {
5231 idle = 0
5232 } else {
5233 idle++
5234 }
5235
5236 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
5237 lock(&forcegc.lock)
5238 forcegc.idle = 0
5239 var list gList
5240 list.push(forcegc.g)
5241 injectglist(&list)
5242 unlock(&forcegc.lock)
5243 }
5244 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
5245 lasttrace = now
5246 schedtrace(debug.scheddetail > 0)
5247 }
5248 unlock(&sched.sysmonlock)
5249 }
5250 }
5251
5252 type sysmontick struct {
5253 schedtick uint32
5254 schedwhen int64
5255 syscalltick uint32
5256 syscallwhen int64
5257 }
5258
5259
5260
5261 const forcePreemptNS = 10 * 1000 * 1000
5262
5263 func retake(now int64) uint32 {
5264 n := 0
5265
5266
5267 lock(&allpLock)
5268
5269
5270
5271 for i := 0; i < len(allp); i++ {
5272 _p_ := allp[i]
5273 if _p_ == nil {
5274
5275
5276 continue
5277 }
5278 pd := &_p_.sysmontick
5279 s := _p_.status
5280 sysretake := false
5281 if s == _Prunning || s == _Psyscall {
5282
5283 t := int64(_p_.schedtick)
5284 if int64(pd.schedtick) != t {
5285 pd.schedtick = uint32(t)
5286 pd.schedwhen = now
5287 } else if pd.schedwhen+forcePreemptNS <= now {
5288 preemptone(_p_)
5289
5290
5291 sysretake = true
5292 }
5293 }
5294 if s == _Psyscall {
5295
5296 t := int64(_p_.syscalltick)
5297 if !sysretake && int64(pd.syscalltick) != t {
5298 pd.syscalltick = uint32(t)
5299 pd.syscallwhen = now
5300 continue
5301 }
5302
5303
5304
5305 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
5306 continue
5307 }
5308
5309 unlock(&allpLock)
5310
5311
5312
5313
5314 incidlelocked(-1)
5315 if atomic.Cas(&_p_.status, s, _Pidle) {
5316 if trace.enabled {
5317 traceGoSysBlock(_p_)
5318 traceProcStop(_p_)
5319 }
5320 n++
5321 _p_.syscalltick++
5322 handoffp(_p_)
5323 }
5324 incidlelocked(1)
5325 lock(&allpLock)
5326 }
5327 }
5328 unlock(&allpLock)
5329 return uint32(n)
5330 }
5331
5332
5333
5334
5335
5336
5337 func preemptall() bool {
5338 res := false
5339 for _, _p_ := range allp {
5340 if _p_.status != _Prunning {
5341 continue
5342 }
5343 if preemptone(_p_) {
5344 res = true
5345 }
5346 }
5347 return res
5348 }
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360 func preemptone(_p_ *p) bool {
5361 mp := _p_.m.ptr()
5362 if mp == nil || mp == getg().m {
5363 return false
5364 }
5365 gp := mp.curg
5366 if gp == nil || gp == mp.g0 {
5367 return false
5368 }
5369
5370 gp.preempt = true
5371
5372
5373
5374
5375
5376 gp.stackguard0 = stackPreempt
5377
5378
5379 if preemptMSupported && debug.asyncpreemptoff == 0 {
5380 _p_.preempt = true
5381 preemptM(mp)
5382 }
5383
5384 return true
5385 }
5386
5387 var starttime int64
5388
5389 func schedtrace(detailed bool) {
5390 now := nanotime()
5391 if starttime == 0 {
5392 starttime = now
5393 }
5394
5395 lock(&sched.lock)
5396 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
5397 if detailed {
5398 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
5399 }
5400
5401
5402
5403 for i, _p_ := range allp {
5404 mp := _p_.m.ptr()
5405 h := atomic.Load(&_p_.runqhead)
5406 t := atomic.Load(&_p_.runqtail)
5407 if detailed {
5408 id := int64(-1)
5409 if mp != nil {
5410 id = mp.id
5411 }
5412 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, " timerslen=", len(_p_.timers), "\n")
5413 } else {
5414
5415
5416 print(" ")
5417 if i == 0 {
5418 print("[")
5419 }
5420 print(t - h)
5421 if i == len(allp)-1 {
5422 print("]\n")
5423 }
5424 }
5425 }
5426
5427 if !detailed {
5428 unlock(&sched.lock)
5429 return
5430 }
5431
5432 for mp := allm; mp != nil; mp = mp.alllink {
5433 _p_ := mp.p.ptr()
5434 gp := mp.curg
5435 lockedg := mp.lockedg.ptr()
5436 id1 := int32(-1)
5437 if _p_ != nil {
5438 id1 = _p_.id
5439 }
5440 id2 := int64(-1)
5441 if gp != nil {
5442 id2 = gp.goid
5443 }
5444 id3 := int64(-1)
5445 if lockedg != nil {
5446 id3 = lockedg.goid
5447 }
5448 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
5449 }
5450
5451 lock(&allglock)
5452 for gi := 0; gi < len(allgs); gi++ {
5453 gp := allgs[gi]
5454 mp := gp.m
5455 lockedm := gp.lockedm.ptr()
5456 id1 := int64(-1)
5457 if mp != nil {
5458 id1 = mp.id
5459 }
5460 id2 := int64(-1)
5461 if lockedm != nil {
5462 id2 = lockedm.id
5463 }
5464 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n")
5465 }
5466 unlock(&allglock)
5467 unlock(&sched.lock)
5468 }
5469
5470
5471
5472
5473
5474
5475 func schedEnableUser(enable bool) {
5476 lock(&sched.lock)
5477 if sched.disable.user == !enable {
5478 unlock(&sched.lock)
5479 return
5480 }
5481 sched.disable.user = !enable
5482 if enable {
5483 n := sched.disable.n
5484 sched.disable.n = 0
5485 globrunqputbatch(&sched.disable.runnable, n)
5486 unlock(&sched.lock)
5487 for ; n != 0 && sched.npidle != 0; n-- {
5488 startm(nil, false)
5489 }
5490 } else {
5491 unlock(&sched.lock)
5492 }
5493 }
5494
5495
5496
5497
5498
5499 func schedEnabled(gp *g) bool {
5500 assertLockHeld(&sched.lock)
5501
5502 if sched.disable.user {
5503 return isSystemGoroutine(gp, true)
5504 }
5505 return true
5506 }
5507
5508
5509
5510
5511
5512 func mput(mp *m) {
5513 assertLockHeld(&sched.lock)
5514
5515 mp.schedlink = sched.midle
5516 sched.midle.set(mp)
5517 sched.nmidle++
5518 checkdead()
5519 }
5520
5521
5522
5523
5524
5525 func mget() *m {
5526 assertLockHeld(&sched.lock)
5527
5528 mp := sched.midle.ptr()
5529 if mp != nil {
5530 sched.midle = mp.schedlink
5531 sched.nmidle--
5532 }
5533 return mp
5534 }
5535
5536
5537
5538
5539
5540 func globrunqput(gp *g) {
5541 assertLockHeld(&sched.lock)
5542
5543 sched.runq.pushBack(gp)
5544 sched.runqsize++
5545 }
5546
5547
5548
5549
5550
5551 func globrunqputhead(gp *g) {
5552 assertLockHeld(&sched.lock)
5553
5554 sched.runq.push(gp)
5555 sched.runqsize++
5556 }
5557
5558
5559
5560
5561 func globrunqputbatch(batch *gQueue, n int32) {
5562 assertLockHeld(&sched.lock)
5563
5564 sched.runq.pushBackAll(*batch)
5565 sched.runqsize += n
5566 *batch = gQueue{}
5567 }
5568
5569
5570
5571 func globrunqget(_p_ *p, max int32) *g {
5572 assertLockHeld(&sched.lock)
5573
5574 if sched.runqsize == 0 {
5575 return nil
5576 }
5577
5578 n := sched.runqsize/gomaxprocs + 1
5579 if n > sched.runqsize {
5580 n = sched.runqsize
5581 }
5582 if max > 0 && n > max {
5583 n = max
5584 }
5585 if n > int32(len(_p_.runq))/2 {
5586 n = int32(len(_p_.runq)) / 2
5587 }
5588
5589 sched.runqsize -= n
5590
5591 gp := sched.runq.pop()
5592 n--
5593 for ; n > 0; n-- {
5594 gp1 := sched.runq.pop()
5595 runqput(_p_, gp1, false)
5596 }
5597 return gp
5598 }
5599
5600
5601 type pMask []uint32
5602
5603
5604 func (p pMask) read(id uint32) bool {
5605 word := id / 32
5606 mask := uint32(1) << (id % 32)
5607 return (atomic.Load(&p[word]) & mask) != 0
5608 }
5609
5610
5611 func (p pMask) set(id int32) {
5612 word := id / 32
5613 mask := uint32(1) << (id % 32)
5614 atomic.Or(&p[word], mask)
5615 }
5616
5617
5618 func (p pMask) clear(id int32) {
5619 word := id / 32
5620 mask := uint32(1) << (id % 32)
5621 atomic.And(&p[word], ^mask)
5622 }
5623
5624
5625
5626
5627
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649 func updateTimerPMask(pp *p) {
5650 if atomic.Load(&pp.numTimers) > 0 {
5651 return
5652 }
5653
5654
5655
5656
5657 lock(&pp.timersLock)
5658 if atomic.Load(&pp.numTimers) == 0 {
5659 timerpMask.clear(pp.id)
5660 }
5661 unlock(&pp.timersLock)
5662 }
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673 func pidleput(_p_ *p) {
5674 assertLockHeld(&sched.lock)
5675
5676 if !runqempty(_p_) {
5677 throw("pidleput: P has non-empty run queue")
5678 }
5679 updateTimerPMask(_p_)
5680 idlepMask.set(_p_.id)
5681 _p_.link = sched.pidle
5682 sched.pidle.set(_p_)
5683 atomic.Xadd(&sched.npidle, 1)
5684 }
5685
5686
5687
5688
5689
5690
5691
5692 func pidleget() *p {
5693 assertLockHeld(&sched.lock)
5694
5695 _p_ := sched.pidle.ptr()
5696 if _p_ != nil {
5697
5698 timerpMask.set(_p_.id)
5699 idlepMask.clear(_p_.id)
5700 sched.pidle = _p_.link
5701 atomic.Xadd(&sched.npidle, -1)
5702 }
5703 return _p_
5704 }
5705
5706
5707
5708 func runqempty(_p_ *p) bool {
5709
5710
5711
5712
5713 for {
5714 head := atomic.Load(&_p_.runqhead)
5715 tail := atomic.Load(&_p_.runqtail)
5716 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
5717 if tail == atomic.Load(&_p_.runqtail) {
5718 return head == tail && runnext == 0
5719 }
5720 }
5721 }
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732 const randomizeScheduler = raceenabled
5733
5734
5735
5736
5737
5738
5739 func runqput(_p_ *p, gp *g, next bool) {
5740 if randomizeScheduler && next && fastrand()%2 == 0 {
5741 next = false
5742 }
5743
5744 if next {
5745 retryNext:
5746 oldnext := _p_.runnext
5747 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
5748 goto retryNext
5749 }
5750 if oldnext == 0 {
5751 return
5752 }
5753
5754 gp = oldnext.ptr()
5755 }
5756
5757 retry:
5758 h := atomic.LoadAcq(&_p_.runqhead)
5759 t := _p_.runqtail
5760 if t-h < uint32(len(_p_.runq)) {
5761 _p_.runq[t%uint32(len(_p_.runq))].set(gp)
5762 atomic.StoreRel(&_p_.runqtail, t+1)
5763 return
5764 }
5765 if runqputslow(_p_, gp, h, t) {
5766 return
5767 }
5768
5769 goto retry
5770 }
5771
5772
5773
5774 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
5775 var batch [len(_p_.runq)/2 + 1]*g
5776
5777
5778 n := t - h
5779 n = n / 2
5780 if n != uint32(len(_p_.runq)/2) {
5781 throw("runqputslow: queue is not full")
5782 }
5783 for i := uint32(0); i < n; i++ {
5784 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
5785 }
5786 if !atomic.CasRel(&_p_.runqhead, h, h+n) {
5787 return false
5788 }
5789 batch[n] = gp
5790
5791 if randomizeScheduler {
5792 for i := uint32(1); i <= n; i++ {
5793 j := fastrandn(i + 1)
5794 batch[i], batch[j] = batch[j], batch[i]
5795 }
5796 }
5797
5798
5799 for i := uint32(0); i < n; i++ {
5800 batch[i].schedlink.set(batch[i+1])
5801 }
5802 var q gQueue
5803 q.head.set(batch[0])
5804 q.tail.set(batch[n])
5805
5806
5807 lock(&sched.lock)
5808 globrunqputbatch(&q, int32(n+1))
5809 unlock(&sched.lock)
5810 return true
5811 }
5812
5813
5814
5815
5816
5817 func runqputbatch(pp *p, q *gQueue, qsize int) {
5818 h := atomic.LoadAcq(&pp.runqhead)
5819 t := pp.runqtail
5820 n := uint32(0)
5821 for !q.empty() && t-h < uint32(len(pp.runq)) {
5822 gp := q.pop()
5823 pp.runq[t%uint32(len(pp.runq))].set(gp)
5824 t++
5825 n++
5826 }
5827 qsize -= int(n)
5828
5829 if randomizeScheduler {
5830 off := func(o uint32) uint32 {
5831 return (pp.runqtail + o) % uint32(len(pp.runq))
5832 }
5833 for i := uint32(1); i < n; i++ {
5834 j := fastrandn(i + 1)
5835 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
5836 }
5837 }
5838
5839 atomic.StoreRel(&pp.runqtail, t)
5840 if !q.empty() {
5841 lock(&sched.lock)
5842 globrunqputbatch(q, int32(qsize))
5843 unlock(&sched.lock)
5844 }
5845 }
5846
5847
5848
5849
5850
5851 func runqget(_p_ *p) (gp *g, inheritTime bool) {
5852
5853 for {
5854 next := _p_.runnext
5855 if next == 0 {
5856 break
5857 }
5858 if _p_.runnext.cas(next, 0) {
5859 return next.ptr(), true
5860 }
5861 }
5862
5863 for {
5864 h := atomic.LoadAcq(&_p_.runqhead)
5865 t := _p_.runqtail
5866 if t == h {
5867 return nil, false
5868 }
5869 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
5870 if atomic.CasRel(&_p_.runqhead, h, h+1) {
5871 return gp, false
5872 }
5873 }
5874 }
5875
5876
5877
5878
5879
5880 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
5881 for {
5882 h := atomic.LoadAcq(&_p_.runqhead)
5883 t := atomic.LoadAcq(&_p_.runqtail)
5884 n := t - h
5885 n = n - n/2
5886 if n == 0 {
5887 if stealRunNextG {
5888
5889 if next := _p_.runnext; next != 0 {
5890 if _p_.status == _Prunning {
5891
5892
5893
5894
5895
5896
5897
5898
5899
5900
5901 if GOOS != "windows" {
5902 usleep(3)
5903 } else {
5904
5905
5906
5907 osyield()
5908 }
5909 }
5910 if !_p_.runnext.cas(next, 0) {
5911 continue
5912 }
5913 batch[batchHead%uint32(len(batch))] = next
5914 return 1
5915 }
5916 }
5917 return 0
5918 }
5919 if n > uint32(len(_p_.runq)/2) {
5920 continue
5921 }
5922 for i := uint32(0); i < n; i++ {
5923 g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
5924 batch[(batchHead+i)%uint32(len(batch))] = g
5925 }
5926 if atomic.CasRel(&_p_.runqhead, h, h+n) {
5927 return n
5928 }
5929 }
5930 }
5931
5932
5933
5934
5935 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
5936 t := _p_.runqtail
5937 n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
5938 if n == 0 {
5939 return nil
5940 }
5941 n--
5942 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
5943 if n == 0 {
5944 return gp
5945 }
5946 h := atomic.LoadAcq(&_p_.runqhead)
5947 if t-h+n >= uint32(len(_p_.runq)) {
5948 throw("runqsteal: runq overflow")
5949 }
5950 atomic.StoreRel(&_p_.runqtail, t+n)
5951 return gp
5952 }
5953
5954
5955
5956 type gQueue struct {
5957 head guintptr
5958 tail guintptr
5959 }
5960
5961
5962 func (q *gQueue) empty() bool {
5963 return q.head == 0
5964 }
5965
5966
5967 func (q *gQueue) push(gp *g) {
5968 gp.schedlink = q.head
5969 q.head.set(gp)
5970 if q.tail == 0 {
5971 q.tail.set(gp)
5972 }
5973 }
5974
5975
5976 func (q *gQueue) pushBack(gp *g) {
5977 gp.schedlink = 0
5978 if q.tail != 0 {
5979 q.tail.ptr().schedlink.set(gp)
5980 } else {
5981 q.head.set(gp)
5982 }
5983 q.tail.set(gp)
5984 }
5985
5986
5987
5988 func (q *gQueue) pushBackAll(q2 gQueue) {
5989 if q2.tail == 0 {
5990 return
5991 }
5992 q2.tail.ptr().schedlink = 0
5993 if q.tail != 0 {
5994 q.tail.ptr().schedlink = q2.head
5995 } else {
5996 q.head = q2.head
5997 }
5998 q.tail = q2.tail
5999 }
6000
6001
6002
6003 func (q *gQueue) pop() *g {
6004 gp := q.head.ptr()
6005 if gp != nil {
6006 q.head = gp.schedlink
6007 if q.head == 0 {
6008 q.tail = 0
6009 }
6010 }
6011 return gp
6012 }
6013
6014
6015 func (q *gQueue) popList() gList {
6016 stack := gList{q.head}
6017 *q = gQueue{}
6018 return stack
6019 }
6020
6021
6022
6023 type gList struct {
6024 head guintptr
6025 }
6026
6027
6028 func (l *gList) empty() bool {
6029 return l.head == 0
6030 }
6031
6032
6033 func (l *gList) push(gp *g) {
6034 gp.schedlink = l.head
6035 l.head.set(gp)
6036 }
6037
6038
6039 func (l *gList) pushAll(q gQueue) {
6040 if !q.empty() {
6041 q.tail.ptr().schedlink = l.head
6042 l.head = q.head
6043 }
6044 }
6045
6046
6047 func (l *gList) pop() *g {
6048 gp := l.head.ptr()
6049 if gp != nil {
6050 l.head = gp.schedlink
6051 }
6052 return gp
6053 }
6054
6055
6056 func setMaxThreads(in int) (out int) {
6057 lock(&sched.lock)
6058 out = int(sched.maxmcount)
6059 if in > 0x7fffffff {
6060 sched.maxmcount = 0x7fffffff
6061 } else {
6062 sched.maxmcount = int32(in)
6063 }
6064 checkmcount()
6065 unlock(&sched.lock)
6066 return
6067 }
6068
6069 func haveexperiment(name string) bool {
6070 x := sys.Goexperiment
6071 for x != "" {
6072 xname := ""
6073 i := bytealg.IndexByteString(x, ',')
6074 if i < 0 {
6075 xname, x = x, ""
6076 } else {
6077 xname, x = x[:i], x[i+1:]
6078 }
6079 if xname == name {
6080 return true
6081 }
6082 if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name {
6083 return false
6084 }
6085 }
6086 return false
6087 }
6088
6089
6090 func procPin() int {
6091 _g_ := getg()
6092 mp := _g_.m
6093
6094 mp.locks++
6095 return int(mp.p.ptr().id)
6096 }
6097
6098
6099 func procUnpin() {
6100 _g_ := getg()
6101 _g_.m.locks--
6102 }
6103
6104
6105
6106 func sync_runtime_procPin() int {
6107 return procPin()
6108 }
6109
6110
6111
6112 func sync_runtime_procUnpin() {
6113 procUnpin()
6114 }
6115
6116
6117
6118 func sync_atomic_runtime_procPin() int {
6119 return procPin()
6120 }
6121
6122
6123
6124 func sync_atomic_runtime_procUnpin() {
6125 procUnpin()
6126 }
6127
6128
6129
6130
6131 func sync_runtime_canSpin(i int) bool {
6132
6133
6134
6135
6136
6137 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
6138 return false
6139 }
6140 if p := getg().m.p.ptr(); !runqempty(p) {
6141 return false
6142 }
6143 return true
6144 }
6145
6146
6147
6148 func sync_runtime_doSpin() {
6149 procyield(active_spin_cnt)
6150 }
6151
6152 var stealOrder randomOrder
6153
6154
6155
6156
6157
6158 type randomOrder struct {
6159 count uint32
6160 coprimes []uint32
6161 }
6162
6163 type randomEnum struct {
6164 i uint32
6165 count uint32
6166 pos uint32
6167 inc uint32
6168 }
6169
6170 func (ord *randomOrder) reset(count uint32) {
6171 ord.count = count
6172 ord.coprimes = ord.coprimes[:0]
6173 for i := uint32(1); i <= count; i++ {
6174 if gcd(i, count) == 1 {
6175 ord.coprimes = append(ord.coprimes, i)
6176 }
6177 }
6178 }
6179
6180 func (ord *randomOrder) start(i uint32) randomEnum {
6181 return randomEnum{
6182 count: ord.count,
6183 pos: i % ord.count,
6184 inc: ord.coprimes[i%uint32(len(ord.coprimes))],
6185 }
6186 }
6187
6188 func (enum *randomEnum) done() bool {
6189 return enum.i == enum.count
6190 }
6191
6192 func (enum *randomEnum) next() {
6193 enum.i++
6194 enum.pos = (enum.pos + enum.inc) % enum.count
6195 }
6196
6197 func (enum *randomEnum) position() uint32 {
6198 return enum.pos
6199 }
6200
6201 func gcd(a, b uint32) uint32 {
6202 for b != 0 {
6203 a, b = b, a%b
6204 }
6205 return a
6206 }
6207
6208
6209
6210 type initTask struct {
6211
6212 state uintptr
6213 ndeps uintptr
6214 nfns uintptr
6215
6216
6217 }
6218
6219
6220
6221 var inittrace tracestat
6222
6223 type tracestat struct {
6224 active bool
6225 id int64
6226 allocs uint64
6227 bytes uint64
6228 }
6229
6230 func doInit(t *initTask) {
6231 switch t.state {
6232 case 2:
6233 return
6234 case 1:
6235 throw("recursive call during initialization - linker skew")
6236 default:
6237 t.state = 1
6238
6239 for i := uintptr(0); i < t.ndeps; i++ {
6240 p := add(unsafe.Pointer(t), (3+i)*sys.PtrSize)
6241 t2 := *(**initTask)(p)
6242 doInit(t2)
6243 }
6244
6245 if t.nfns == 0 {
6246 t.state = 2
6247 return
6248 }
6249
6250 var (
6251 start int64
6252 before tracestat
6253 )
6254
6255 if inittrace.active {
6256 start = nanotime()
6257
6258 before = inittrace
6259 }
6260
6261 firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*sys.PtrSize)
6262 for i := uintptr(0); i < t.nfns; i++ {
6263 p := add(firstFunc, i*sys.PtrSize)
6264 f := *(*func())(unsafe.Pointer(&p))
6265 f()
6266 }
6267
6268 if inittrace.active {
6269 end := nanotime()
6270
6271 after := inittrace
6272
6273 pkg := funcpkgpath(findfunc(funcPC(firstFunc)))
6274
6275 var sbuf [24]byte
6276 print("init ", pkg, " @")
6277 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
6278 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
6279 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
6280 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
6281 print("\n")
6282 }
6283
6284 t.state = 2
6285 }
6286 }
6287
View as plain text