Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "runtime/internal/atomic"
12 "runtime/internal/sys"
13 "unsafe"
14 )
15
16
17 var modinfo string
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113 var (
114 m0 m
115 g0 g
116 mcache0 *mcache
117 raceprocctx0 uintptr
118 )
119
120
121 var runtime_inittask initTask
122
123
124 var main_inittask initTask
125
126
127
128
129
130 var main_init_done chan bool
131
132
133 func main_main()
134
135
136 var mainStarted bool
137
138
139 var runtimeInitTime int64
140
141
142 var initSigmask sigset
143
144
145 func main() {
146 g := getg()
147
148
149
150 g.m.g0.racectx = 0
151
152
153
154
155 if goarch.PtrSize == 8 {
156 maxstacksize = 1000000000
157 } else {
158 maxstacksize = 250000000
159 }
160
161
162
163
164 maxstackceiling = 2 * maxstacksize
165
166
167 mainStarted = true
168
169 if GOARCH != "wasm" {
170 systemstack(func() {
171 newm(sysmon, nil, -1)
172 })
173 }
174
175
176
177
178
179
180
181 lockOSThread()
182
183 if g.m != &m0 {
184 throw("runtime.main not on m0")
185 }
186
187
188
189 runtimeInitTime = nanotime()
190 if runtimeInitTime == 0 {
191 throw("nanotime returning zero")
192 }
193
194 if debug.inittrace != 0 {
195 inittrace.id = getg().goid
196 inittrace.active = true
197 }
198
199 doInit(&runtime_inittask)
200
201
202 needUnlock := true
203 defer func() {
204 if needUnlock {
205 unlockOSThread()
206 }
207 }()
208
209 gcenable()
210
211 main_init_done = make(chan bool)
212 if iscgo {
213 if _cgo_thread_start == nil {
214 throw("_cgo_thread_start missing")
215 }
216 if GOOS != "windows" {
217 if _cgo_setenv == nil {
218 throw("_cgo_setenv missing")
219 }
220 if _cgo_unsetenv == nil {
221 throw("_cgo_unsetenv missing")
222 }
223 }
224 if _cgo_notify_runtime_init_done == nil {
225 throw("_cgo_notify_runtime_init_done missing")
226 }
227
228
229 startTemplateThread()
230 cgocall(_cgo_notify_runtime_init_done, nil)
231 }
232
233 doInit(&main_inittask)
234
235
236
237 inittrace.active = false
238
239 close(main_init_done)
240
241 needUnlock = false
242 unlockOSThread()
243
244 if isarchive || islibrary {
245
246
247 return
248 }
249 fn := main_main
250 fn()
251 if raceenabled {
252 racefini()
253 }
254
255
256
257
258
259 if atomic.Load(&runningPanicDefers) != 0 {
260
261 for c := 0; c < 1000; c++ {
262 if atomic.Load(&runningPanicDefers) == 0 {
263 break
264 }
265 Gosched()
266 }
267 }
268 if atomic.Load(&panicking) != 0 {
269 gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
270 }
271
272 exit(0)
273 for {
274 var x *int32
275 *x = 0
276 }
277 }
278
279
280
281 func os_beforeExit() {
282 if raceenabled {
283 racefini()
284 }
285 }
286
287
288 func init() {
289 go forcegchelper()
290 }
291
292 func forcegchelper() {
293 forcegc.g = getg()
294 lockInit(&forcegc.lock, lockRankForcegc)
295 for {
296 lock(&forcegc.lock)
297 if forcegc.idle != 0 {
298 throw("forcegc: phase error")
299 }
300 atomic.Store(&forcegc.idle, 1)
301 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1)
302
303 if debug.gctrace > 0 {
304 println("GC forced")
305 }
306
307 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
308 }
309 }
310
311
312
313
314
315 func Gosched() {
316 checkTimeouts()
317 mcall(gosched_m)
318 }
319
320
321
322
323 func goschedguarded() {
324 mcall(goschedguarded_m)
325 }
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
345 if reason != waitReasonSleep {
346 checkTimeouts()
347 }
348 mp := acquirem()
349 gp := mp.curg
350 status := readgstatus(gp)
351 if status != _Grunning && status != _Gscanrunning {
352 throw("gopark: bad g status")
353 }
354 mp.waitlock = lock
355 mp.waitunlockf = unlockf
356 gp.waitreason = reason
357 mp.waittraceev = traceEv
358 mp.waittraceskip = traceskip
359 releasem(mp)
360
361 mcall(park_m)
362 }
363
364
365
366 func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
367 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
368 }
369
370 func goready(gp *g, traceskip int) {
371 systemstack(func() {
372 ready(gp, traceskip, true)
373 })
374 }
375
376
377 func acquireSudog() *sudog {
378
379
380
381
382
383
384
385
386 mp := acquirem()
387 pp := mp.p.ptr()
388 if len(pp.sudogcache) == 0 {
389 lock(&sched.sudoglock)
390
391 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
392 s := sched.sudogcache
393 sched.sudogcache = s.next
394 s.next = nil
395 pp.sudogcache = append(pp.sudogcache, s)
396 }
397 unlock(&sched.sudoglock)
398
399 if len(pp.sudogcache) == 0 {
400 pp.sudogcache = append(pp.sudogcache, new(sudog))
401 }
402 }
403 n := len(pp.sudogcache)
404 s := pp.sudogcache[n-1]
405 pp.sudogcache[n-1] = nil
406 pp.sudogcache = pp.sudogcache[:n-1]
407 if s.elem != nil {
408 throw("acquireSudog: found s.elem != nil in cache")
409 }
410 releasem(mp)
411 return s
412 }
413
414
415 func releaseSudog(s *sudog) {
416 if s.elem != nil {
417 throw("runtime: sudog with non-nil elem")
418 }
419 if s.isSelect {
420 throw("runtime: sudog with non-false isSelect")
421 }
422 if s.next != nil {
423 throw("runtime: sudog with non-nil next")
424 }
425 if s.prev != nil {
426 throw("runtime: sudog with non-nil prev")
427 }
428 if s.waitlink != nil {
429 throw("runtime: sudog with non-nil waitlink")
430 }
431 if s.c != nil {
432 throw("runtime: sudog with non-nil c")
433 }
434 gp := getg()
435 if gp.param != nil {
436 throw("runtime: releaseSudog with non-nil gp.param")
437 }
438 mp := acquirem()
439 pp := mp.p.ptr()
440 if len(pp.sudogcache) == cap(pp.sudogcache) {
441
442 var first, last *sudog
443 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
444 n := len(pp.sudogcache)
445 p := pp.sudogcache[n-1]
446 pp.sudogcache[n-1] = nil
447 pp.sudogcache = pp.sudogcache[:n-1]
448 if first == nil {
449 first = p
450 } else {
451 last.next = p
452 }
453 last = p
454 }
455 lock(&sched.sudoglock)
456 last.next = sched.sudogcache
457 sched.sudogcache = first
458 unlock(&sched.sudoglock)
459 }
460 pp.sudogcache = append(pp.sudogcache, s)
461 releasem(mp)
462 }
463
464
465 func badmcall(fn func(*g)) {
466 throw("runtime: mcall called on m->g0 stack")
467 }
468
469 func badmcall2(fn func(*g)) {
470 throw("runtime: mcall function returned")
471 }
472
473 func badreflectcall() {
474 panic(plainError("arg size to reflect.call more than 1GB"))
475 }
476
477 var badmorestackg0Msg = "fatal: morestack on g0\n"
478
479
480
481 func badmorestackg0() {
482 sp := stringStructOf(&badmorestackg0Msg)
483 write(2, sp.str, int32(sp.len))
484 }
485
486 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
487
488
489
490 func badmorestackgsignal() {
491 sp := stringStructOf(&badmorestackgsignalMsg)
492 write(2, sp.str, int32(sp.len))
493 }
494
495
496 func badctxt() {
497 throw("ctxt != 0")
498 }
499
500 func lockedOSThread() bool {
501 gp := getg()
502 return gp.lockedm != 0 && gp.m.lockedg != 0
503 }
504
505 var (
506
507
508
509
510
511
512 allglock mutex
513 allgs []*g
514
515
516
517
518
519
520
521
522
523
524
525
526
527 allglen uintptr
528 allgptr **g
529 )
530
531 func allgadd(gp *g) {
532 if readgstatus(gp) == _Gidle {
533 throw("allgadd: bad status Gidle")
534 }
535
536 lock(&allglock)
537 allgs = append(allgs, gp)
538 if &allgs[0] != allgptr {
539 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
540 }
541 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
542 unlock(&allglock)
543 }
544
545
546
547
548 func allGsSnapshot() []*g {
549 assertWorldStoppedOrLockHeld(&allglock)
550
551
552
553
554
555
556 return allgs[:len(allgs):len(allgs)]
557 }
558
559
560 func atomicAllG() (**g, uintptr) {
561 length := atomic.Loaduintptr(&allglen)
562 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
563 return ptr, length
564 }
565
566
567 func atomicAllGIndex(ptr **g, i uintptr) *g {
568 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
569 }
570
571
572
573
574 func forEachG(fn func(gp *g)) {
575 lock(&allglock)
576 for _, gp := range allgs {
577 fn(gp)
578 }
579 unlock(&allglock)
580 }
581
582
583
584
585
586 func forEachGRace(fn func(gp *g)) {
587 ptr, length := atomicAllG()
588 for i := uintptr(0); i < length; i++ {
589 gp := atomicAllGIndex(ptr, i)
590 fn(gp)
591 }
592 return
593 }
594
595 const (
596
597
598 _GoidCacheBatch = 16
599 )
600
601
602
603 func cpuinit() {
604 const prefix = "GODEBUG="
605 var env string
606
607 switch GOOS {
608 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
609 cpu.DebugOptions = true
610
611
612
613
614 n := int32(0)
615 for argv_index(argv, argc+1+n) != nil {
616 n++
617 }
618
619 for i := int32(0); i < n; i++ {
620 p := argv_index(argv, argc+1+i)
621 s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
622
623 if hasPrefix(s, prefix) {
624 env = gostring(p)[len(prefix):]
625 break
626 }
627 }
628 }
629
630 cpu.Initialize(env)
631
632
633
634 switch GOARCH {
635 case "386", "amd64":
636 x86HasPOPCNT = cpu.X86.HasPOPCNT
637 x86HasSSE41 = cpu.X86.HasSSE41
638 x86HasFMA = cpu.X86.HasFMA
639
640 case "arm":
641 armHasVFPv4 = cpu.ARM.HasVFPv4
642
643 case "arm64":
644 arm64HasATOMICS = cpu.ARM64.HasATOMICS
645 }
646 }
647
648
649
650
651
652
653
654
655
656 func schedinit() {
657 lockInit(&sched.lock, lockRankSched)
658 lockInit(&sched.sysmonlock, lockRankSysmon)
659 lockInit(&sched.deferlock, lockRankDefer)
660 lockInit(&sched.sudoglock, lockRankSudog)
661 lockInit(&deadlock, lockRankDeadlock)
662 lockInit(&paniclk, lockRankPanic)
663 lockInit(&allglock, lockRankAllg)
664 lockInit(&allpLock, lockRankAllp)
665 lockInit(&reflectOffs.lock, lockRankReflectOffs)
666 lockInit(&finlock, lockRankFin)
667 lockInit(&trace.bufLock, lockRankTraceBuf)
668 lockInit(&trace.stringsLock, lockRankTraceStrings)
669 lockInit(&trace.lock, lockRankTrace)
670 lockInit(&cpuprof.lock, lockRankCpuprof)
671 lockInit(&trace.stackTab.lock, lockRankTraceStackTab)
672
673
674
675 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
676
677
678
679 _g_ := getg()
680 if raceenabled {
681 _g_.racectx, raceprocctx0 = raceinit()
682 }
683
684 sched.maxmcount = 10000
685
686
687 worldStopped()
688
689 moduledataverify()
690 stackinit()
691 mallocinit()
692 cpuinit()
693 alginit()
694 fastrandinit()
695 mcommoninit(_g_.m, -1)
696 modulesinit()
697 typelinksinit()
698 itabsinit()
699 stkobjinit()
700
701 sigsave(&_g_.m.sigmask)
702 initSigmask = _g_.m.sigmask
703
704 if offset := unsafe.Offsetof(sched.timeToRun); offset%8 != 0 {
705 println(offset)
706 throw("sched.timeToRun not aligned to 8 bytes")
707 }
708
709 goargs()
710 goenvs()
711 parsedebugvars()
712 gcinit()
713
714 lock(&sched.lock)
715 sched.lastpoll = uint64(nanotime())
716 procs := ncpu
717 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
718 procs = n
719 }
720 if procresize(procs) != nil {
721 throw("unknown runnable goroutine during bootstrap")
722 }
723 unlock(&sched.lock)
724
725
726 worldStarted()
727
728
729
730
731 if debug.cgocheck > 1 {
732 writeBarrier.cgo = true
733 writeBarrier.enabled = true
734 for _, p := range allp {
735 p.wbBuf.reset()
736 }
737 }
738
739 if buildVersion == "" {
740
741
742 buildVersion = "unknown"
743 }
744 if len(modinfo) == 1 {
745
746
747 modinfo = ""
748 }
749 }
750
751 func dumpgstatus(gp *g) {
752 _g_ := getg()
753 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
754 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
755 }
756
757
758 func checkmcount() {
759 assertLockHeld(&sched.lock)
760
761 if mcount() > sched.maxmcount {
762 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
763 throw("thread exhaustion")
764 }
765 }
766
767
768
769
770
771 func mReserveID() int64 {
772 assertLockHeld(&sched.lock)
773
774 if sched.mnext+1 < sched.mnext {
775 throw("runtime: thread ID overflow")
776 }
777 id := sched.mnext
778 sched.mnext++
779 checkmcount()
780 return id
781 }
782
783
784 func mcommoninit(mp *m, id int64) {
785 _g_ := getg()
786
787
788 if _g_ != _g_.m.g0 {
789 callers(1, mp.createstack[:])
790 }
791
792 lock(&sched.lock)
793
794 if id >= 0 {
795 mp.id = id
796 } else {
797 mp.id = mReserveID()
798 }
799
800 lo := uint32(int64Hash(uint64(mp.id), fastrandseed))
801 hi := uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
802 if lo|hi == 0 {
803 hi = 1
804 }
805
806
807 if goarch.BigEndian {
808 mp.fastrand = uint64(lo)<<32 | uint64(hi)
809 } else {
810 mp.fastrand = uint64(hi)<<32 | uint64(lo)
811 }
812
813 mpreinit(mp)
814 if mp.gsignal != nil {
815 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
816 }
817
818
819
820 mp.alllink = allm
821
822
823
824 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
825 unlock(&sched.lock)
826
827
828 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
829 mp.cgoCallers = new(cgoCallers)
830 }
831 }
832
833 var fastrandseed uintptr
834
835 func fastrandinit() {
836 s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
837 getRandomData(s)
838 }
839
840
841 func ready(gp *g, traceskip int, next bool) {
842 if trace.enabled {
843 traceGoUnpark(gp, traceskip)
844 }
845
846 status := readgstatus(gp)
847
848
849 _g_ := getg()
850 mp := acquirem()
851 if status&^_Gscan != _Gwaiting {
852 dumpgstatus(gp)
853 throw("bad g->status in ready")
854 }
855
856
857 casgstatus(gp, _Gwaiting, _Grunnable)
858 runqput(_g_.m.p.ptr(), gp, next)
859 wakep()
860 releasem(mp)
861 }
862
863
864
865 const freezeStopWait = 0x7fffffff
866
867
868
869 var freezing uint32
870
871
872
873
874 func freezetheworld() {
875 atomic.Store(&freezing, 1)
876
877
878
879 for i := 0; i < 5; i++ {
880
881 sched.stopwait = freezeStopWait
882 atomic.Store(&sched.gcwaiting, 1)
883
884 if !preemptall() {
885 break
886 }
887 usleep(1000)
888 }
889
890 usleep(1000)
891 preemptall()
892 usleep(1000)
893 }
894
895
896
897
898 func readgstatus(gp *g) uint32 {
899 return atomic.Load(&gp.atomicstatus)
900 }
901
902
903
904
905
906 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
907 success := false
908
909
910 switch oldval {
911 default:
912 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
913 dumpgstatus(gp)
914 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
915 case _Gscanrunnable,
916 _Gscanwaiting,
917 _Gscanrunning,
918 _Gscansyscall,
919 _Gscanpreempted:
920 if newval == oldval&^_Gscan {
921 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
922 }
923 }
924 if !success {
925 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
926 dumpgstatus(gp)
927 throw("casfrom_Gscanstatus: gp->status is not in scan state")
928 }
929 releaseLockRank(lockRankGscan)
930 }
931
932
933
934 func castogscanstatus(gp *g, oldval, newval uint32) bool {
935 switch oldval {
936 case _Grunnable,
937 _Grunning,
938 _Gwaiting,
939 _Gsyscall:
940 if newval == oldval|_Gscan {
941 r := atomic.Cas(&gp.atomicstatus, oldval, newval)
942 if r {
943 acquireLockRank(lockRankGscan)
944 }
945 return r
946
947 }
948 }
949 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
950 throw("castogscanstatus")
951 panic("not reached")
952 }
953
954
955
956
957
958
959 func casgstatus(gp *g, oldval, newval uint32) {
960 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
961 systemstack(func() {
962 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
963 throw("casgstatus: bad incoming values")
964 })
965 }
966
967 acquireLockRank(lockRankGscan)
968 releaseLockRank(lockRankGscan)
969
970
971 const yieldDelay = 5 * 1000
972 var nextYield int64
973
974
975
976 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
977 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
978 throw("casgstatus: waiting for Gwaiting but is Grunnable")
979 }
980 if i == 0 {
981 nextYield = nanotime() + yieldDelay
982 }
983 if nanotime() < nextYield {
984 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
985 procyield(1)
986 }
987 } else {
988 osyield()
989 nextYield = nanotime() + yieldDelay/2
990 }
991 }
992
993
994 if oldval == _Grunning {
995
996 if gp.trackingSeq%gTrackingPeriod == 0 {
997 gp.tracking = true
998 }
999 gp.trackingSeq++
1000 }
1001 if gp.tracking {
1002 if oldval == _Grunnable {
1003
1004
1005
1006 now := nanotime()
1007 gp.runnableTime += now - gp.runnableStamp
1008 gp.runnableStamp = 0
1009 }
1010 if newval == _Grunnable {
1011
1012
1013 now := nanotime()
1014 gp.runnableStamp = now
1015 } else if newval == _Grunning {
1016
1017
1018
1019 gp.tracking = false
1020 sched.timeToRun.record(gp.runnableTime)
1021 gp.runnableTime = 0
1022 }
1023 }
1024 }
1025
1026
1027
1028
1029
1030
1031
1032 func casgcopystack(gp *g) uint32 {
1033 for {
1034 oldstatus := readgstatus(gp) &^ _Gscan
1035 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
1036 throw("copystack: bad status, not Gwaiting or Grunnable")
1037 }
1038 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
1039 return oldstatus
1040 }
1041 }
1042 }
1043
1044
1045
1046
1047
1048 func casGToPreemptScan(gp *g, old, new uint32) {
1049 if old != _Grunning || new != _Gscan|_Gpreempted {
1050 throw("bad g transition")
1051 }
1052 acquireLockRank(lockRankGscan)
1053 for !atomic.Cas(&gp.atomicstatus, _Grunning, _Gscan|_Gpreempted) {
1054 }
1055 }
1056
1057
1058
1059
1060 func casGFromPreempted(gp *g, old, new uint32) bool {
1061 if old != _Gpreempted || new != _Gwaiting {
1062 throw("bad g transition")
1063 }
1064 return atomic.Cas(&gp.atomicstatus, _Gpreempted, _Gwaiting)
1065 }
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081 func stopTheWorld(reason string) {
1082 semacquire(&worldsema)
1083 gp := getg()
1084 gp.m.preemptoff = reason
1085 systemstack(func() {
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096 casgstatus(gp, _Grunning, _Gwaiting)
1097 stopTheWorldWithSema()
1098 casgstatus(gp, _Gwaiting, _Grunning)
1099 })
1100 }
1101
1102
1103 func startTheWorld() {
1104 systemstack(func() { startTheWorldWithSema(false) })
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 mp := acquirem()
1122 mp.preemptoff = ""
1123 semrelease1(&worldsema, true, 0)
1124 releasem(mp)
1125 }
1126
1127
1128
1129
1130 func stopTheWorldGC(reason string) {
1131 semacquire(&gcsema)
1132 stopTheWorld(reason)
1133 }
1134
1135
1136 func startTheWorldGC() {
1137 startTheWorld()
1138 semrelease(&gcsema)
1139 }
1140
1141
1142 var worldsema uint32 = 1
1143
1144
1145
1146
1147
1148
1149
1150 var gcsema uint32 = 1
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174 func stopTheWorldWithSema() {
1175 _g_ := getg()
1176
1177
1178
1179 if _g_.m.locks > 0 {
1180 throw("stopTheWorld: holding locks")
1181 }
1182
1183 lock(&sched.lock)
1184 sched.stopwait = gomaxprocs
1185 atomic.Store(&sched.gcwaiting, 1)
1186 preemptall()
1187
1188 _g_.m.p.ptr().status = _Pgcstop
1189 sched.stopwait--
1190
1191 for _, p := range allp {
1192 s := p.status
1193 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
1194 if trace.enabled {
1195 traceGoSysBlock(p)
1196 traceProcStop(p)
1197 }
1198 p.syscalltick++
1199 sched.stopwait--
1200 }
1201 }
1202
1203 for {
1204 p := pidleget()
1205 if p == nil {
1206 break
1207 }
1208 p.status = _Pgcstop
1209 sched.stopwait--
1210 }
1211 wait := sched.stopwait > 0
1212 unlock(&sched.lock)
1213
1214
1215 if wait {
1216 for {
1217
1218 if notetsleep(&sched.stopnote, 100*1000) {
1219 noteclear(&sched.stopnote)
1220 break
1221 }
1222 preemptall()
1223 }
1224 }
1225
1226
1227 bad := ""
1228 if sched.stopwait != 0 {
1229 bad = "stopTheWorld: not stopped (stopwait != 0)"
1230 } else {
1231 for _, p := range allp {
1232 if p.status != _Pgcstop {
1233 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1234 }
1235 }
1236 }
1237 if atomic.Load(&freezing) != 0 {
1238
1239
1240
1241
1242 lock(&deadlock)
1243 lock(&deadlock)
1244 }
1245 if bad != "" {
1246 throw(bad)
1247 }
1248
1249 worldStopped()
1250 }
1251
1252 func startTheWorldWithSema(emitTraceEvent bool) int64 {
1253 assertWorldStopped()
1254
1255 mp := acquirem()
1256 if netpollinited() {
1257 list := netpoll(0)
1258 injectglist(&list)
1259 }
1260 lock(&sched.lock)
1261
1262 procs := gomaxprocs
1263 if newprocs != 0 {
1264 procs = newprocs
1265 newprocs = 0
1266 }
1267 p1 := procresize(procs)
1268 sched.gcwaiting = 0
1269 if sched.sysmonwait != 0 {
1270 sched.sysmonwait = 0
1271 notewakeup(&sched.sysmonnote)
1272 }
1273 unlock(&sched.lock)
1274
1275 worldStarted()
1276
1277 for p1 != nil {
1278 p := p1
1279 p1 = p1.link.ptr()
1280 if p.m != 0 {
1281 mp := p.m.ptr()
1282 p.m = 0
1283 if mp.nextp != 0 {
1284 throw("startTheWorld: inconsistent mp->nextp")
1285 }
1286 mp.nextp.set(p)
1287 notewakeup(&mp.park)
1288 } else {
1289
1290 newm(nil, p, -1)
1291 }
1292 }
1293
1294
1295 startTime := nanotime()
1296 if emitTraceEvent {
1297 traceGCSTWDone()
1298 }
1299
1300
1301
1302
1303 wakep()
1304
1305 releasem(mp)
1306
1307 return startTime
1308 }
1309
1310
1311
1312 func usesLibcall() bool {
1313 switch GOOS {
1314 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1315 return true
1316 case "openbsd":
1317 return GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64"
1318 }
1319 return false
1320 }
1321
1322
1323
1324 func mStackIsSystemAllocated() bool {
1325 switch GOOS {
1326 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1327 return true
1328 case "openbsd":
1329 switch GOARCH {
1330 case "386", "amd64", "arm", "arm64":
1331 return true
1332 }
1333 }
1334 return false
1335 }
1336
1337
1338
1339 func mstart()
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350 func mstart0() {
1351 _g_ := getg()
1352
1353 osStack := _g_.stack.lo == 0
1354 if osStack {
1355
1356
1357
1358
1359
1360
1361
1362
1363 size := _g_.stack.hi
1364 if size == 0 {
1365 size = 8192 * sys.StackGuardMultiplier
1366 }
1367 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1368 _g_.stack.lo = _g_.stack.hi - size + 1024
1369 }
1370
1371
1372 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1373
1374
1375 _g_.stackguard1 = _g_.stackguard0
1376 mstart1()
1377
1378
1379 if mStackIsSystemAllocated() {
1380
1381
1382
1383 osStack = true
1384 }
1385 mexit(osStack)
1386 }
1387
1388
1389
1390
1391 func mstart1() {
1392 _g_ := getg()
1393
1394 if _g_ != _g_.m.g0 {
1395 throw("bad runtime·mstart")
1396 }
1397
1398
1399
1400
1401
1402
1403
1404 _g_.sched.g = guintptr(unsafe.Pointer(_g_))
1405 _g_.sched.pc = getcallerpc()
1406 _g_.sched.sp = getcallersp()
1407
1408 asminit()
1409 minit()
1410
1411
1412
1413 if _g_.m == &m0 {
1414 mstartm0()
1415 }
1416
1417 if fn := _g_.m.mstartfn; fn != nil {
1418 fn()
1419 }
1420
1421 if _g_.m != &m0 {
1422 acquirep(_g_.m.nextp.ptr())
1423 _g_.m.nextp = 0
1424 }
1425 schedule()
1426 }
1427
1428
1429
1430
1431
1432
1433
1434 func mstartm0() {
1435
1436
1437
1438 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1439 cgoHasExtraM = true
1440 newextram()
1441 }
1442 initsig(false)
1443 }
1444
1445
1446
1447 func mPark() {
1448 gp := getg()
1449 notesleep(&gp.m.park)
1450 noteclear(&gp.m.park)
1451 }
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463 func mexit(osStack bool) {
1464 g := getg()
1465 m := g.m
1466
1467 if m == &m0 {
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479 handoffp(releasep())
1480 lock(&sched.lock)
1481 sched.nmfreed++
1482 checkdead()
1483 unlock(&sched.lock)
1484 mPark()
1485 throw("locked m0 woke up")
1486 }
1487
1488 sigblock(true)
1489 unminit()
1490
1491
1492 if m.gsignal != nil {
1493 stackfree(m.gsignal.stack)
1494
1495
1496
1497
1498 m.gsignal = nil
1499 }
1500
1501
1502 lock(&sched.lock)
1503 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1504 if *pprev == m {
1505 *pprev = m.alllink
1506 goto found
1507 }
1508 }
1509 throw("m not found in allm")
1510 found:
1511 if !osStack {
1512
1513
1514
1515
1516 atomic.Store(&m.freeWait, 1)
1517
1518
1519
1520
1521 m.freelink = sched.freem
1522 sched.freem = m
1523 }
1524 unlock(&sched.lock)
1525
1526 atomic.Xadd64(&ncgocall, int64(m.ncgocall))
1527
1528
1529 handoffp(releasep())
1530
1531
1532
1533
1534
1535 lock(&sched.lock)
1536 sched.nmfreed++
1537 checkdead()
1538 unlock(&sched.lock)
1539
1540 if GOOS == "darwin" || GOOS == "ios" {
1541
1542
1543 if atomic.Load(&m.signalPending) != 0 {
1544 atomic.Xadd(&pendingPreemptSignals, -1)
1545 }
1546 }
1547
1548
1549
1550 mdestroy(m)
1551
1552 if osStack {
1553
1554
1555 return
1556 }
1557
1558
1559
1560
1561
1562 exitThread(&m.freeWait)
1563 }
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576 func forEachP(fn func(*p)) {
1577 mp := acquirem()
1578 _p_ := getg().m.p.ptr()
1579
1580 lock(&sched.lock)
1581 if sched.safePointWait != 0 {
1582 throw("forEachP: sched.safePointWait != 0")
1583 }
1584 sched.safePointWait = gomaxprocs - 1
1585 sched.safePointFn = fn
1586
1587
1588 for _, p := range allp {
1589 if p != _p_ {
1590 atomic.Store(&p.runSafePointFn, 1)
1591 }
1592 }
1593 preemptall()
1594
1595
1596
1597
1598
1599
1600
1601 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1602 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1603 fn(p)
1604 sched.safePointWait--
1605 }
1606 }
1607
1608 wait := sched.safePointWait > 0
1609 unlock(&sched.lock)
1610
1611
1612 fn(_p_)
1613
1614
1615
1616 for _, p := range allp {
1617 s := p.status
1618 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
1619 if trace.enabled {
1620 traceGoSysBlock(p)
1621 traceProcStop(p)
1622 }
1623 p.syscalltick++
1624 handoffp(p)
1625 }
1626 }
1627
1628
1629 if wait {
1630 for {
1631
1632
1633
1634
1635 if notetsleep(&sched.safePointNote, 100*1000) {
1636 noteclear(&sched.safePointNote)
1637 break
1638 }
1639 preemptall()
1640 }
1641 }
1642 if sched.safePointWait != 0 {
1643 throw("forEachP: not done")
1644 }
1645 for _, p := range allp {
1646 if p.runSafePointFn != 0 {
1647 throw("forEachP: P did not run fn")
1648 }
1649 }
1650
1651 lock(&sched.lock)
1652 sched.safePointFn = nil
1653 unlock(&sched.lock)
1654 releasem(mp)
1655 }
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668 func runSafePointFn() {
1669 p := getg().m.p.ptr()
1670
1671
1672
1673 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1674 return
1675 }
1676 sched.safePointFn(p)
1677 lock(&sched.lock)
1678 sched.safePointWait--
1679 if sched.safePointWait == 0 {
1680 notewakeup(&sched.safePointNote)
1681 }
1682 unlock(&sched.lock)
1683 }
1684
1685
1686
1687
1688 var cgoThreadStart unsafe.Pointer
1689
1690 type cgothreadstart struct {
1691 g guintptr
1692 tls *uint64
1693 fn unsafe.Pointer
1694 }
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705 func allocm(_p_ *p, fn func(), id int64) *m {
1706 allocmLock.rlock()
1707
1708
1709
1710
1711 acquirem()
1712
1713 _g_ := getg()
1714 if _g_.m.p == 0 {
1715 acquirep(_p_)
1716 }
1717
1718
1719
1720 if sched.freem != nil {
1721 lock(&sched.lock)
1722 var newList *m
1723 for freem := sched.freem; freem != nil; {
1724 if freem.freeWait != 0 {
1725 next := freem.freelink
1726 freem.freelink = newList
1727 newList = freem
1728 freem = next
1729 continue
1730 }
1731
1732
1733
1734 systemstack(func() {
1735 stackfree(freem.g0.stack)
1736 })
1737 freem = freem.freelink
1738 }
1739 sched.freem = newList
1740 unlock(&sched.lock)
1741 }
1742
1743 mp := new(m)
1744 mp.mstartfn = fn
1745 mcommoninit(mp, id)
1746
1747
1748
1749 if iscgo || mStackIsSystemAllocated() {
1750 mp.g0 = malg(-1)
1751 } else {
1752 mp.g0 = malg(8192 * sys.StackGuardMultiplier)
1753 }
1754 mp.g0.m = mp
1755
1756 if _p_ == _g_.m.p.ptr() {
1757 releasep()
1758 }
1759
1760 releasem(_g_.m)
1761 allocmLock.runlock()
1762 return mp
1763 }
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799 func needm() {
1800 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1801
1802
1803
1804
1805
1806
1807 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
1808 exit(1)
1809 }
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819 var sigmask sigset
1820 sigsave(&sigmask)
1821 sigblock(false)
1822
1823
1824
1825
1826
1827 mp := lockextra(false)
1828
1829
1830
1831
1832
1833
1834
1835
1836 mp.needextram = mp.schedlink == 0
1837 extraMCount--
1838 unlockextra(mp.schedlink.ptr())
1839
1840
1841 mp.sigmask = sigmask
1842
1843
1844
1845 osSetupTLS(mp)
1846
1847
1848
1849
1850
1851
1852 setg(mp.g0)
1853 _g_ := getg()
1854 _g_.stack.hi = getcallersp() + 1024
1855 _g_.stack.lo = getcallersp() - 32*1024
1856 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1857
1858
1859 asminit()
1860 minit()
1861
1862
1863 casgstatus(mp.curg, _Gdead, _Gsyscall)
1864 atomic.Xadd(&sched.ngsys, -1)
1865 }
1866
1867 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
1868
1869
1870
1871
1872 func newextram() {
1873 c := atomic.Xchg(&extraMWaiters, 0)
1874 if c > 0 {
1875 for i := uint32(0); i < c; i++ {
1876 oneNewExtraM()
1877 }
1878 } else {
1879
1880 mp := lockextra(true)
1881 unlockextra(mp)
1882 if mp == nil {
1883 oneNewExtraM()
1884 }
1885 }
1886 }
1887
1888
1889 func oneNewExtraM() {
1890
1891
1892
1893
1894
1895 mp := allocm(nil, nil, -1)
1896 gp := malg(4096)
1897 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
1898 gp.sched.sp = gp.stack.hi
1899 gp.sched.sp -= 4 * goarch.PtrSize
1900 gp.sched.lr = 0
1901 gp.sched.g = guintptr(unsafe.Pointer(gp))
1902 gp.syscallpc = gp.sched.pc
1903 gp.syscallsp = gp.sched.sp
1904 gp.stktopsp = gp.sched.sp
1905
1906
1907
1908
1909 casgstatus(gp, _Gidle, _Gdead)
1910 gp.m = mp
1911 mp.curg = gp
1912 mp.lockedInt++
1913 mp.lockedg.set(gp)
1914 gp.lockedm.set(mp)
1915 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
1916 if raceenabled {
1917 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
1918 }
1919
1920 allgadd(gp)
1921
1922
1923
1924
1925
1926 atomic.Xadd(&sched.ngsys, +1)
1927
1928
1929 mnext := lockextra(true)
1930 mp.schedlink.set(mnext)
1931 extraMCount++
1932 unlockextra(mp)
1933 }
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958 func dropm() {
1959
1960
1961
1962 mp := getg().m
1963
1964
1965 casgstatus(mp.curg, _Gsyscall, _Gdead)
1966 mp.curg.preemptStop = false
1967 atomic.Xadd(&sched.ngsys, +1)
1968
1969
1970
1971
1972
1973 sigmask := mp.sigmask
1974 sigblock(false)
1975 unminit()
1976
1977 mnext := lockextra(true)
1978 extraMCount++
1979 mp.schedlink.set(mnext)
1980
1981 setg(nil)
1982
1983
1984 unlockextra(mp)
1985
1986 msigrestore(sigmask)
1987 }
1988
1989
1990 func getm() uintptr {
1991 return uintptr(unsafe.Pointer(getg().m))
1992 }
1993
1994 var extram uintptr
1995 var extraMCount uint32
1996 var extraMWaiters uint32
1997
1998
1999
2000
2001
2002
2003
2004 func lockextra(nilokay bool) *m {
2005 const locked = 1
2006
2007 incr := false
2008 for {
2009 old := atomic.Loaduintptr(&extram)
2010 if old == locked {
2011 osyield_no_g()
2012 continue
2013 }
2014 if old == 0 && !nilokay {
2015 if !incr {
2016
2017
2018
2019 atomic.Xadd(&extraMWaiters, 1)
2020 incr = true
2021 }
2022 usleep_no_g(1)
2023 continue
2024 }
2025 if atomic.Casuintptr(&extram, old, locked) {
2026 return (*m)(unsafe.Pointer(old))
2027 }
2028 osyield_no_g()
2029 continue
2030 }
2031 }
2032
2033
2034 func unlockextra(mp *m) {
2035 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
2036 }
2037
2038 var (
2039
2040
2041
2042 allocmLock rwmutex
2043
2044
2045
2046
2047 execLock rwmutex
2048 )
2049
2050
2051
2052
2053 var newmHandoff struct {
2054 lock mutex
2055
2056
2057
2058 newm muintptr
2059
2060
2061
2062 waiting bool
2063 wake note
2064
2065
2066
2067
2068 haveTemplateThread uint32
2069 }
2070
2071
2072
2073
2074
2075
2076
2077 func newm(fn func(), _p_ *p, id int64) {
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088 acquirem()
2089
2090 mp := allocm(_p_, fn, id)
2091 mp.nextp.set(_p_)
2092 mp.sigmask = initSigmask
2093 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105 lock(&newmHandoff.lock)
2106 if newmHandoff.haveTemplateThread == 0 {
2107 throw("on a locked thread with no template thread")
2108 }
2109 mp.schedlink = newmHandoff.newm
2110 newmHandoff.newm.set(mp)
2111 if newmHandoff.waiting {
2112 newmHandoff.waiting = false
2113 notewakeup(&newmHandoff.wake)
2114 }
2115 unlock(&newmHandoff.lock)
2116
2117
2118
2119 releasem(getg().m)
2120 return
2121 }
2122 newm1(mp)
2123 releasem(getg().m)
2124 }
2125
2126 func newm1(mp *m) {
2127 if iscgo {
2128 var ts cgothreadstart
2129 if _cgo_thread_start == nil {
2130 throw("_cgo_thread_start missing")
2131 }
2132 ts.g.set(mp.g0)
2133 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2134 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2135 if msanenabled {
2136 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2137 }
2138 if asanenabled {
2139 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2140 }
2141 execLock.rlock()
2142 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2143 execLock.runlock()
2144 return
2145 }
2146 execLock.rlock()
2147 newosproc(mp)
2148 execLock.runlock()
2149 }
2150
2151
2152
2153
2154
2155 func startTemplateThread() {
2156 if GOARCH == "wasm" {
2157 return
2158 }
2159
2160
2161
2162 mp := acquirem()
2163 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2164 releasem(mp)
2165 return
2166 }
2167 newm(templateThread, nil, -1)
2168 releasem(mp)
2169 }
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183 func templateThread() {
2184 lock(&sched.lock)
2185 sched.nmsys++
2186 checkdead()
2187 unlock(&sched.lock)
2188
2189 for {
2190 lock(&newmHandoff.lock)
2191 for newmHandoff.newm != 0 {
2192 newm := newmHandoff.newm.ptr()
2193 newmHandoff.newm = 0
2194 unlock(&newmHandoff.lock)
2195 for newm != nil {
2196 next := newm.schedlink.ptr()
2197 newm.schedlink = 0
2198 newm1(newm)
2199 newm = next
2200 }
2201 lock(&newmHandoff.lock)
2202 }
2203 newmHandoff.waiting = true
2204 noteclear(&newmHandoff.wake)
2205 unlock(&newmHandoff.lock)
2206 notesleep(&newmHandoff.wake)
2207 }
2208 }
2209
2210
2211
2212 func stopm() {
2213 _g_ := getg()
2214
2215 if _g_.m.locks != 0 {
2216 throw("stopm holding locks")
2217 }
2218 if _g_.m.p != 0 {
2219 throw("stopm holding p")
2220 }
2221 if _g_.m.spinning {
2222 throw("stopm spinning")
2223 }
2224
2225 lock(&sched.lock)
2226 mput(_g_.m)
2227 unlock(&sched.lock)
2228 mPark()
2229 acquirep(_g_.m.nextp.ptr())
2230 _g_.m.nextp = 0
2231 }
2232
2233 func mspinning() {
2234
2235 getg().m.spinning = true
2236 }
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249 func startm(_p_ *p, spinning bool) {
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266 mp := acquirem()
2267 lock(&sched.lock)
2268 if _p_ == nil {
2269 _p_ = pidleget()
2270 if _p_ == nil {
2271 unlock(&sched.lock)
2272 if spinning {
2273
2274
2275 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2276 throw("startm: negative nmspinning")
2277 }
2278 }
2279 releasem(mp)
2280 return
2281 }
2282 }
2283 nmp := mget()
2284 if nmp == nil {
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297 id := mReserveID()
2298 unlock(&sched.lock)
2299
2300 var fn func()
2301 if spinning {
2302
2303 fn = mspinning
2304 }
2305 newm(fn, _p_, id)
2306
2307
2308 releasem(mp)
2309 return
2310 }
2311 unlock(&sched.lock)
2312 if nmp.spinning {
2313 throw("startm: m is spinning")
2314 }
2315 if nmp.nextp != 0 {
2316 throw("startm: m has p")
2317 }
2318 if spinning && !runqempty(_p_) {
2319 throw("startm: p has runnable gs")
2320 }
2321
2322 nmp.spinning = spinning
2323 nmp.nextp.set(_p_)
2324 notewakeup(&nmp.park)
2325
2326
2327 releasem(mp)
2328 }
2329
2330
2331
2332
2333 func handoffp(_p_ *p) {
2334
2335
2336
2337
2338 if !runqempty(_p_) || sched.runqsize != 0 {
2339 startm(_p_, false)
2340 return
2341 }
2342
2343 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
2344 startm(_p_, false)
2345 return
2346 }
2347
2348
2349 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) {
2350 startm(_p_, true)
2351 return
2352 }
2353 lock(&sched.lock)
2354 if sched.gcwaiting != 0 {
2355 _p_.status = _Pgcstop
2356 sched.stopwait--
2357 if sched.stopwait == 0 {
2358 notewakeup(&sched.stopnote)
2359 }
2360 unlock(&sched.lock)
2361 return
2362 }
2363 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
2364 sched.safePointFn(_p_)
2365 sched.safePointWait--
2366 if sched.safePointWait == 0 {
2367 notewakeup(&sched.safePointNote)
2368 }
2369 }
2370 if sched.runqsize != 0 {
2371 unlock(&sched.lock)
2372 startm(_p_, false)
2373 return
2374 }
2375
2376
2377 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
2378 unlock(&sched.lock)
2379 startm(_p_, false)
2380 return
2381 }
2382
2383
2384
2385 when := nobarrierWakeTime(_p_)
2386 pidleput(_p_)
2387 unlock(&sched.lock)
2388
2389 if when != 0 {
2390 wakeNetPoller(when)
2391 }
2392 }
2393
2394
2395
2396 func wakep() {
2397 if atomic.Load(&sched.npidle) == 0 {
2398 return
2399 }
2400
2401 if atomic.Load(&sched.nmspinning) != 0 || !atomic.Cas(&sched.nmspinning, 0, 1) {
2402 return
2403 }
2404 startm(nil, true)
2405 }
2406
2407
2408
2409 func stoplockedm() {
2410 _g_ := getg()
2411
2412 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
2413 throw("stoplockedm: inconsistent locking")
2414 }
2415 if _g_.m.p != 0 {
2416
2417 _p_ := releasep()
2418 handoffp(_p_)
2419 }
2420 incidlelocked(1)
2421
2422 mPark()
2423 status := readgstatus(_g_.m.lockedg.ptr())
2424 if status&^_Gscan != _Grunnable {
2425 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
2426 dumpgstatus(_g_.m.lockedg.ptr())
2427 throw("stoplockedm: not runnable")
2428 }
2429 acquirep(_g_.m.nextp.ptr())
2430 _g_.m.nextp = 0
2431 }
2432
2433
2434
2435
2436 func startlockedm(gp *g) {
2437 _g_ := getg()
2438
2439 mp := gp.lockedm.ptr()
2440 if mp == _g_.m {
2441 throw("startlockedm: locked to me")
2442 }
2443 if mp.nextp != 0 {
2444 throw("startlockedm: m has p")
2445 }
2446
2447 incidlelocked(-1)
2448 _p_ := releasep()
2449 mp.nextp.set(_p_)
2450 notewakeup(&mp.park)
2451 stopm()
2452 }
2453
2454
2455
2456 func gcstopm() {
2457 _g_ := getg()
2458
2459 if sched.gcwaiting == 0 {
2460 throw("gcstopm: not waiting for gc")
2461 }
2462 if _g_.m.spinning {
2463 _g_.m.spinning = false
2464
2465
2466 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2467 throw("gcstopm: negative nmspinning")
2468 }
2469 }
2470 _p_ := releasep()
2471 lock(&sched.lock)
2472 _p_.status = _Pgcstop
2473 sched.stopwait--
2474 if sched.stopwait == 0 {
2475 notewakeup(&sched.stopnote)
2476 }
2477 unlock(&sched.lock)
2478 stopm()
2479 }
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490 func execute(gp *g, inheritTime bool) {
2491 _g_ := getg()
2492
2493
2494
2495 _g_.m.curg = gp
2496 gp.m = _g_.m
2497 casgstatus(gp, _Grunnable, _Grunning)
2498 gp.waitsince = 0
2499 gp.preempt = false
2500 gp.stackguard0 = gp.stack.lo + _StackGuard
2501 if !inheritTime {
2502 _g_.m.p.ptr().schedtick++
2503 }
2504
2505
2506 hz := sched.profilehz
2507 if _g_.m.profilehz != hz {
2508 setThreadCPUProfiler(hz)
2509 }
2510
2511 if trace.enabled {
2512
2513
2514 if gp.syscallsp != 0 && gp.sysblocktraced {
2515 traceGoSysExit(gp.sysexitticks)
2516 }
2517 traceGoStart()
2518 }
2519
2520 gogo(&gp.sched)
2521 }
2522
2523
2524
2525 func findrunnable() (gp *g, inheritTime bool) {
2526 _g_ := getg()
2527
2528
2529
2530
2531
2532 top:
2533 _p_ := _g_.m.p.ptr()
2534 if sched.gcwaiting != 0 {
2535 gcstopm()
2536 goto top
2537 }
2538 if _p_.runSafePointFn != 0 {
2539 runSafePointFn()
2540 }
2541
2542 now, pollUntil, _ := checkTimers(_p_, 0)
2543
2544 if fingwait && fingwake {
2545 if gp := wakefing(); gp != nil {
2546 ready(gp, 0, true)
2547 }
2548 }
2549 if *cgo_yield != nil {
2550 asmcgocall(*cgo_yield, nil)
2551 }
2552
2553
2554 if gp, inheritTime := runqget(_p_); gp != nil {
2555 return gp, inheritTime
2556 }
2557
2558
2559 if sched.runqsize != 0 {
2560 lock(&sched.lock)
2561 gp := globrunqget(_p_, 0)
2562 unlock(&sched.lock)
2563 if gp != nil {
2564 return gp, false
2565 }
2566 }
2567
2568
2569
2570
2571
2572
2573
2574
2575 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
2576 if list := netpoll(0); !list.empty() {
2577 gp := list.pop()
2578 injectglist(&list)
2579 casgstatus(gp, _Gwaiting, _Grunnable)
2580 if trace.enabled {
2581 traceGoUnpark(gp, 0)
2582 }
2583 return gp, false
2584 }
2585 }
2586
2587
2588
2589
2590
2591
2592 procs := uint32(gomaxprocs)
2593 if _g_.m.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) {
2594 if !_g_.m.spinning {
2595 _g_.m.spinning = true
2596 atomic.Xadd(&sched.nmspinning, 1)
2597 }
2598
2599 gp, inheritTime, tnow, w, newWork := stealWork(now)
2600 now = tnow
2601 if gp != nil {
2602
2603 return gp, inheritTime
2604 }
2605 if newWork {
2606
2607
2608 goto top
2609 }
2610 if w != 0 && (pollUntil == 0 || w < pollUntil) {
2611
2612 pollUntil = w
2613 }
2614 }
2615
2616
2617
2618
2619
2620
2621 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
2622 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
2623 if node != nil {
2624 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2625 gp := node.gp.ptr()
2626 casgstatus(gp, _Gwaiting, _Grunnable)
2627 if trace.enabled {
2628 traceGoUnpark(gp, 0)
2629 }
2630 return gp, false
2631 }
2632 }
2633
2634
2635
2636
2637
2638 gp, otherReady := beforeIdle(now, pollUntil)
2639 if gp != nil {
2640 casgstatus(gp, _Gwaiting, _Grunnable)
2641 if trace.enabled {
2642 traceGoUnpark(gp, 0)
2643 }
2644 return gp, false
2645 }
2646 if otherReady {
2647 goto top
2648 }
2649
2650
2651
2652
2653
2654 allpSnapshot := allp
2655
2656
2657 idlepMaskSnapshot := idlepMask
2658 timerpMaskSnapshot := timerpMask
2659
2660
2661 lock(&sched.lock)
2662 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
2663 unlock(&sched.lock)
2664 goto top
2665 }
2666 if sched.runqsize != 0 {
2667 gp := globrunqget(_p_, 0)
2668 unlock(&sched.lock)
2669 return gp, false
2670 }
2671 if releasep() != _p_ {
2672 throw("findrunnable: wrong p")
2673 }
2674 pidleput(_p_)
2675 unlock(&sched.lock)
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697 wasSpinning := _g_.m.spinning
2698 if _g_.m.spinning {
2699 _g_.m.spinning = false
2700 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2701 throw("findrunnable: negative nmspinning")
2702 }
2703
2704
2705
2706
2707
2708
2709
2710
2711 _p_ = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
2712 if _p_ != nil {
2713 acquirep(_p_)
2714 _g_.m.spinning = true
2715 atomic.Xadd(&sched.nmspinning, 1)
2716 goto top
2717 }
2718
2719
2720 _p_, gp = checkIdleGCNoP()
2721 if _p_ != nil {
2722 acquirep(_p_)
2723 _g_.m.spinning = true
2724 atomic.Xadd(&sched.nmspinning, 1)
2725
2726
2727 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2728 casgstatus(gp, _Gwaiting, _Grunnable)
2729 if trace.enabled {
2730 traceGoUnpark(gp, 0)
2731 }
2732 return gp, false
2733 }
2734
2735
2736
2737
2738
2739
2740
2741 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
2742 }
2743
2744
2745 if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
2746 atomic.Store64(&sched.pollUntil, uint64(pollUntil))
2747 if _g_.m.p != 0 {
2748 throw("findrunnable: netpoll with p")
2749 }
2750 if _g_.m.spinning {
2751 throw("findrunnable: netpoll with spinning")
2752 }
2753 delay := int64(-1)
2754 if pollUntil != 0 {
2755 if now == 0 {
2756 now = nanotime()
2757 }
2758 delay = pollUntil - now
2759 if delay < 0 {
2760 delay = 0
2761 }
2762 }
2763 if faketime != 0 {
2764
2765 delay = 0
2766 }
2767 list := netpoll(delay)
2768 atomic.Store64(&sched.pollUntil, 0)
2769 atomic.Store64(&sched.lastpoll, uint64(nanotime()))
2770 if faketime != 0 && list.empty() {
2771
2772
2773 stopm()
2774 goto top
2775 }
2776 lock(&sched.lock)
2777 _p_ = pidleget()
2778 unlock(&sched.lock)
2779 if _p_ == nil {
2780 injectglist(&list)
2781 } else {
2782 acquirep(_p_)
2783 if !list.empty() {
2784 gp := list.pop()
2785 injectglist(&list)
2786 casgstatus(gp, _Gwaiting, _Grunnable)
2787 if trace.enabled {
2788 traceGoUnpark(gp, 0)
2789 }
2790 return gp, false
2791 }
2792 if wasSpinning {
2793 _g_.m.spinning = true
2794 atomic.Xadd(&sched.nmspinning, 1)
2795 }
2796 goto top
2797 }
2798 } else if pollUntil != 0 && netpollinited() {
2799 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
2800 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
2801 netpollBreak()
2802 }
2803 }
2804 stopm()
2805 goto top
2806 }
2807
2808
2809
2810
2811
2812 func pollWork() bool {
2813 if sched.runqsize != 0 {
2814 return true
2815 }
2816 p := getg().m.p.ptr()
2817 if !runqempty(p) {
2818 return true
2819 }
2820 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
2821 if list := netpoll(0); !list.empty() {
2822 injectglist(&list)
2823 return true
2824 }
2825 }
2826 return false
2827 }
2828
2829
2830
2831
2832
2833
2834
2835 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
2836 pp := getg().m.p.ptr()
2837
2838 ranTimer := false
2839
2840 const stealTries = 4
2841 for i := 0; i < stealTries; i++ {
2842 stealTimersOrRunNextG := i == stealTries-1
2843
2844 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
2845 if sched.gcwaiting != 0 {
2846
2847 return nil, false, now, pollUntil, true
2848 }
2849 p2 := allp[enum.position()]
2850 if pp == p2 {
2851 continue
2852 }
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
2868 tnow, w, ran := checkTimers(p2, now)
2869 now = tnow
2870 if w != 0 && (pollUntil == 0 || w < pollUntil) {
2871 pollUntil = w
2872 }
2873 if ran {
2874
2875
2876
2877
2878
2879
2880
2881
2882 if gp, inheritTime := runqget(pp); gp != nil {
2883 return gp, inheritTime, now, pollUntil, ranTimer
2884 }
2885 ranTimer = true
2886 }
2887 }
2888
2889
2890 if !idlepMask.read(enum.position()) {
2891 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
2892 return gp, false, now, pollUntil, ranTimer
2893 }
2894 }
2895 }
2896 }
2897
2898
2899
2900
2901 return nil, false, now, pollUntil, ranTimer
2902 }
2903
2904
2905
2906
2907
2908
2909 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
2910 for id, p2 := range allpSnapshot {
2911 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
2912 lock(&sched.lock)
2913 pp := pidleget()
2914 unlock(&sched.lock)
2915 if pp != nil {
2916 return pp
2917 }
2918
2919
2920 break
2921 }
2922 }
2923
2924 return nil
2925 }
2926
2927
2928
2929
2930 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
2931 for id, p2 := range allpSnapshot {
2932 if timerpMaskSnapshot.read(uint32(id)) {
2933 w := nobarrierWakeTime(p2)
2934 if w != 0 && (pollUntil == 0 || w < pollUntil) {
2935 pollUntil = w
2936 }
2937 }
2938 }
2939
2940 return pollUntil
2941 }
2942
2943
2944
2945
2946
2947 func checkIdleGCNoP() (*p, *g) {
2948
2949
2950 if atomic.Load(&gcBlackenEnabled) == 0 {
2951 return nil, nil
2952 }
2953 if !gcMarkWorkAvailable(nil) {
2954 return nil, nil
2955 }
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974 lock(&sched.lock)
2975 pp := pidleget()
2976 if pp == nil {
2977 unlock(&sched.lock)
2978 return nil, nil
2979 }
2980
2981
2982
2983 if gcBlackenEnabled == 0 {
2984 pidleput(pp)
2985 unlock(&sched.lock)
2986 return nil, nil
2987 }
2988
2989 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
2990 if node == nil {
2991 pidleput(pp)
2992 unlock(&sched.lock)
2993 return nil, nil
2994 }
2995
2996 unlock(&sched.lock)
2997
2998 return pp, node.gp.ptr()
2999 }
3000
3001
3002
3003
3004 func wakeNetPoller(when int64) {
3005 if atomic.Load64(&sched.lastpoll) == 0 {
3006
3007
3008
3009
3010 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
3011 if pollerPollUntil == 0 || pollerPollUntil > when {
3012 netpollBreak()
3013 }
3014 } else {
3015
3016
3017 if GOOS != "plan9" {
3018 wakep()
3019 }
3020 }
3021 }
3022
3023 func resetspinning() {
3024 _g_ := getg()
3025 if !_g_.m.spinning {
3026 throw("resetspinning: not a spinning m")
3027 }
3028 _g_.m.spinning = false
3029 nmspinning := atomic.Xadd(&sched.nmspinning, -1)
3030 if int32(nmspinning) < 0 {
3031 throw("findrunnable: negative nmspinning")
3032 }
3033
3034
3035
3036 wakep()
3037 }
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047 func injectglist(glist *gList) {
3048 if glist.empty() {
3049 return
3050 }
3051 if trace.enabled {
3052 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
3053 traceGoUnpark(gp, 0)
3054 }
3055 }
3056
3057
3058
3059 head := glist.head.ptr()
3060 var tail *g
3061 qsize := 0
3062 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3063 tail = gp
3064 qsize++
3065 casgstatus(gp, _Gwaiting, _Grunnable)
3066 }
3067
3068
3069 var q gQueue
3070 q.head.set(head)
3071 q.tail.set(tail)
3072 *glist = gList{}
3073
3074 startIdle := func(n int) {
3075 for ; n != 0 && sched.npidle != 0; n-- {
3076 startm(nil, false)
3077 }
3078 }
3079
3080 pp := getg().m.p.ptr()
3081 if pp == nil {
3082 lock(&sched.lock)
3083 globrunqputbatch(&q, int32(qsize))
3084 unlock(&sched.lock)
3085 startIdle(qsize)
3086 return
3087 }
3088
3089 npidle := int(atomic.Load(&sched.npidle))
3090 var globq gQueue
3091 var n int
3092 for n = 0; n < npidle && !q.empty(); n++ {
3093 g := q.pop()
3094 globq.pushBack(g)
3095 }
3096 if n > 0 {
3097 lock(&sched.lock)
3098 globrunqputbatch(&globq, int32(n))
3099 unlock(&sched.lock)
3100 startIdle(n)
3101 qsize -= n
3102 }
3103
3104 if !q.empty() {
3105 runqputbatch(pp, &q, qsize)
3106 }
3107 }
3108
3109
3110
3111 func schedule() {
3112 _g_ := getg()
3113
3114 if _g_.m.locks != 0 {
3115 throw("schedule: holding locks")
3116 }
3117
3118 if _g_.m.lockedg != 0 {
3119 stoplockedm()
3120 execute(_g_.m.lockedg.ptr(), false)
3121 }
3122
3123
3124
3125 if _g_.m.incgo {
3126 throw("schedule: in cgo")
3127 }
3128
3129 top:
3130 pp := _g_.m.p.ptr()
3131 pp.preempt = false
3132
3133 if sched.gcwaiting != 0 {
3134 gcstopm()
3135 goto top
3136 }
3137 if pp.runSafePointFn != 0 {
3138 runSafePointFn()
3139 }
3140
3141
3142
3143
3144 if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
3145 throw("schedule: spinning with local work")
3146 }
3147
3148 checkTimers(pp, 0)
3149
3150 var gp *g
3151 var inheritTime bool
3152
3153
3154
3155
3156 tryWakeP := false
3157 if trace.enabled || trace.shutdown {
3158 gp = traceReader()
3159 if gp != nil {
3160 casgstatus(gp, _Gwaiting, _Grunnable)
3161 traceGoUnpark(gp, 0)
3162 tryWakeP = true
3163 }
3164 }
3165 if gp == nil && gcBlackenEnabled != 0 {
3166 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
3167 if gp != nil {
3168 tryWakeP = true
3169 }
3170 }
3171 if gp == nil {
3172
3173
3174
3175 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
3176 lock(&sched.lock)
3177 gp = globrunqget(_g_.m.p.ptr(), 1)
3178 unlock(&sched.lock)
3179 }
3180 }
3181 if gp == nil {
3182 gp, inheritTime = runqget(_g_.m.p.ptr())
3183
3184
3185 }
3186 if gp == nil {
3187 gp, inheritTime = findrunnable()
3188 }
3189
3190
3191
3192
3193 if _g_.m.spinning {
3194 resetspinning()
3195 }
3196
3197 if sched.disable.user && !schedEnabled(gp) {
3198
3199
3200
3201 lock(&sched.lock)
3202 if schedEnabled(gp) {
3203
3204
3205 unlock(&sched.lock)
3206 } else {
3207 sched.disable.runnable.pushBack(gp)
3208 sched.disable.n++
3209 unlock(&sched.lock)
3210 goto top
3211 }
3212 }
3213
3214
3215
3216 if tryWakeP {
3217 wakep()
3218 }
3219 if gp.lockedm != 0 {
3220
3221
3222 startlockedm(gp)
3223 goto top
3224 }
3225
3226 execute(gp, inheritTime)
3227 }
3228
3229
3230
3231
3232
3233
3234
3235
3236 func dropg() {
3237 _g_ := getg()
3238
3239 setMNoWB(&_g_.m.curg.m, nil)
3240 setGNoWB(&_g_.m.curg, nil)
3241 }
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252 func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
3253
3254
3255 next := int64(atomic.Load64(&pp.timer0When))
3256 nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest))
3257 if next == 0 || (nextAdj != 0 && nextAdj < next) {
3258 next = nextAdj
3259 }
3260
3261 if next == 0 {
3262
3263 return now, 0, false
3264 }
3265
3266 if now == 0 {
3267 now = nanotime()
3268 }
3269 if now < next {
3270
3271
3272
3273
3274 if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) {
3275 return now, next, false
3276 }
3277 }
3278
3279 lock(&pp.timersLock)
3280
3281 if len(pp.timers) > 0 {
3282 adjusttimers(pp, now)
3283 for len(pp.timers) > 0 {
3284
3285
3286 if tw := runtimer(pp, now); tw != 0 {
3287 if tw > 0 {
3288 pollUntil = tw
3289 }
3290 break
3291 }
3292 ran = true
3293 }
3294 }
3295
3296
3297
3298
3299 if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 {
3300 clearDeletedTimers(pp)
3301 }
3302
3303 unlock(&pp.timersLock)
3304
3305 return now, pollUntil, ran
3306 }
3307
3308 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
3309 unlock((*mutex)(lock))
3310 return true
3311 }
3312
3313
3314 func park_m(gp *g) {
3315 _g_ := getg()
3316
3317 if trace.enabled {
3318 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
3319 }
3320
3321 casgstatus(gp, _Grunning, _Gwaiting)
3322 dropg()
3323
3324 if fn := _g_.m.waitunlockf; fn != nil {
3325 ok := fn(gp, _g_.m.waitlock)
3326 _g_.m.waitunlockf = nil
3327 _g_.m.waitlock = nil
3328 if !ok {
3329 if trace.enabled {
3330 traceGoUnpark(gp, 2)
3331 }
3332 casgstatus(gp, _Gwaiting, _Grunnable)
3333 execute(gp, true)
3334 }
3335 }
3336 schedule()
3337 }
3338
3339 func goschedImpl(gp *g) {
3340 status := readgstatus(gp)
3341 if status&^_Gscan != _Grunning {
3342 dumpgstatus(gp)
3343 throw("bad g status")
3344 }
3345 casgstatus(gp, _Grunning, _Grunnable)
3346 dropg()
3347 lock(&sched.lock)
3348 globrunqput(gp)
3349 unlock(&sched.lock)
3350
3351 schedule()
3352 }
3353
3354
3355 func gosched_m(gp *g) {
3356 if trace.enabled {
3357 traceGoSched()
3358 }
3359 goschedImpl(gp)
3360 }
3361
3362
3363 func goschedguarded_m(gp *g) {
3364
3365 if !canPreemptM(gp.m) {
3366 gogo(&gp.sched)
3367 }
3368
3369 if trace.enabled {
3370 traceGoSched()
3371 }
3372 goschedImpl(gp)
3373 }
3374
3375 func gopreempt_m(gp *g) {
3376 if trace.enabled {
3377 traceGoPreempt()
3378 }
3379 goschedImpl(gp)
3380 }
3381
3382
3383
3384
3385 func preemptPark(gp *g) {
3386 if trace.enabled {
3387 traceGoPark(traceEvGoBlock, 0)
3388 }
3389 status := readgstatus(gp)
3390 if status&^_Gscan != _Grunning {
3391 dumpgstatus(gp)
3392 throw("bad g status")
3393 }
3394 gp.waitreason = waitReasonPreempted
3395
3396 if gp.asyncSafePoint {
3397
3398
3399
3400 f := findfunc(gp.sched.pc)
3401 if !f.valid() {
3402 throw("preempt at unknown pc")
3403 }
3404 if f.flag&funcFlag_SPWRITE != 0 {
3405 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
3406 throw("preempt SPWRITE")
3407 }
3408 }
3409
3410
3411
3412
3413
3414
3415
3416 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
3417 dropg()
3418 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
3419 schedule()
3420 }
3421
3422
3423
3424
3425 func goyield() {
3426 checkTimeouts()
3427 mcall(goyield_m)
3428 }
3429
3430 func goyield_m(gp *g) {
3431 if trace.enabled {
3432 traceGoPreempt()
3433 }
3434 pp := gp.m.p.ptr()
3435 casgstatus(gp, _Grunning, _Grunnable)
3436 dropg()
3437 runqput(pp, gp, false)
3438 schedule()
3439 }
3440
3441
3442 func goexit1() {
3443 if raceenabled {
3444 racegoend()
3445 }
3446 if trace.enabled {
3447 traceGoEnd()
3448 }
3449 mcall(goexit0)
3450 }
3451
3452
3453 func goexit0(gp *g) {
3454 _g_ := getg()
3455 _p_ := _g_.m.p.ptr()
3456
3457 casgstatus(gp, _Grunning, _Gdead)
3458 gcController.addScannableStack(_p_, -int64(gp.stack.hi-gp.stack.lo))
3459 if isSystemGoroutine(gp, false) {
3460 atomic.Xadd(&sched.ngsys, -1)
3461 }
3462 gp.m = nil
3463 locked := gp.lockedm != 0
3464 gp.lockedm = 0
3465 _g_.m.lockedg = 0
3466 gp.preemptStop = false
3467 gp.paniconfault = false
3468 gp._defer = nil
3469 gp._panic = nil
3470 gp.writebuf = nil
3471 gp.waitreason = 0
3472 gp.param = nil
3473 gp.labels = nil
3474 gp.timer = nil
3475
3476 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
3477
3478
3479
3480 assistWorkPerByte := gcController.assistWorkPerByte.Load()
3481 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
3482 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
3483 gp.gcAssistBytes = 0
3484 }
3485
3486 dropg()
3487
3488 if GOARCH == "wasm" {
3489 gfput(_p_, gp)
3490 schedule()
3491 }
3492
3493 if _g_.m.lockedInt != 0 {
3494 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
3495 throw("internal lockOSThread error")
3496 }
3497 gfput(_p_, gp)
3498 if locked {
3499
3500
3501
3502
3503
3504
3505 if GOOS != "plan9" {
3506 gogo(&_g_.m.g0.sched)
3507 } else {
3508
3509
3510 _g_.m.lockedExt = 0
3511 }
3512 }
3513 schedule()
3514 }
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524 func save(pc, sp uintptr) {
3525 _g_ := getg()
3526
3527 if _g_ == _g_.m.g0 || _g_ == _g_.m.gsignal {
3528
3529
3530
3531
3532
3533 throw("save on system g not allowed")
3534 }
3535
3536 _g_.sched.pc = pc
3537 _g_.sched.sp = sp
3538 _g_.sched.lr = 0
3539 _g_.sched.ret = 0
3540
3541
3542
3543 if _g_.sched.ctxt != nil {
3544 badctxt()
3545 }
3546 }
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585 func reentersyscall(pc, sp uintptr) {
3586 _g_ := getg()
3587
3588
3589
3590 _g_.m.locks++
3591
3592
3593
3594
3595
3596 _g_.stackguard0 = stackPreempt
3597 _g_.throwsplit = true
3598
3599
3600 save(pc, sp)
3601 _g_.syscallsp = sp
3602 _g_.syscallpc = pc
3603 casgstatus(_g_, _Grunning, _Gsyscall)
3604 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3605 systemstack(func() {
3606 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3607 throw("entersyscall")
3608 })
3609 }
3610
3611 if trace.enabled {
3612 systemstack(traceGoSysCall)
3613
3614
3615
3616 save(pc, sp)
3617 }
3618
3619 if atomic.Load(&sched.sysmonwait) != 0 {
3620 systemstack(entersyscall_sysmon)
3621 save(pc, sp)
3622 }
3623
3624 if _g_.m.p.ptr().runSafePointFn != 0 {
3625
3626 systemstack(runSafePointFn)
3627 save(pc, sp)
3628 }
3629
3630 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
3631 _g_.sysblocktraced = true
3632 pp := _g_.m.p.ptr()
3633 pp.m = 0
3634 _g_.m.oldp.set(pp)
3635 _g_.m.p = 0
3636 atomic.Store(&pp.status, _Psyscall)
3637 if sched.gcwaiting != 0 {
3638 systemstack(entersyscall_gcwait)
3639 save(pc, sp)
3640 }
3641
3642 _g_.m.locks--
3643 }
3644
3645
3646
3647
3648
3649
3650
3651 func entersyscall() {
3652 reentersyscall(getcallerpc(), getcallersp())
3653 }
3654
3655 func entersyscall_sysmon() {
3656 lock(&sched.lock)
3657 if atomic.Load(&sched.sysmonwait) != 0 {
3658 atomic.Store(&sched.sysmonwait, 0)
3659 notewakeup(&sched.sysmonnote)
3660 }
3661 unlock(&sched.lock)
3662 }
3663
3664 func entersyscall_gcwait() {
3665 _g_ := getg()
3666 _p_ := _g_.m.oldp.ptr()
3667
3668 lock(&sched.lock)
3669 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
3670 if trace.enabled {
3671 traceGoSysBlock(_p_)
3672 traceProcStop(_p_)
3673 }
3674 _p_.syscalltick++
3675 if sched.stopwait--; sched.stopwait == 0 {
3676 notewakeup(&sched.stopnote)
3677 }
3678 }
3679 unlock(&sched.lock)
3680 }
3681
3682
3683
3684 func entersyscallblock() {
3685 _g_ := getg()
3686
3687 _g_.m.locks++
3688 _g_.throwsplit = true
3689 _g_.stackguard0 = stackPreempt
3690 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
3691 _g_.sysblocktraced = true
3692 _g_.m.p.ptr().syscalltick++
3693
3694
3695 pc := getcallerpc()
3696 sp := getcallersp()
3697 save(pc, sp)
3698 _g_.syscallsp = _g_.sched.sp
3699 _g_.syscallpc = _g_.sched.pc
3700 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3701 sp1 := sp
3702 sp2 := _g_.sched.sp
3703 sp3 := _g_.syscallsp
3704 systemstack(func() {
3705 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3706 throw("entersyscallblock")
3707 })
3708 }
3709 casgstatus(_g_, _Grunning, _Gsyscall)
3710 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3711 systemstack(func() {
3712 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3713 throw("entersyscallblock")
3714 })
3715 }
3716
3717 systemstack(entersyscallblock_handoff)
3718
3719
3720 save(getcallerpc(), getcallersp())
3721
3722 _g_.m.locks--
3723 }
3724
3725 func entersyscallblock_handoff() {
3726 if trace.enabled {
3727 traceGoSysCall()
3728 traceGoSysBlock(getg().m.p.ptr())
3729 }
3730 handoffp(releasep())
3731 }
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745 func exitsyscall() {
3746 _g_ := getg()
3747
3748 _g_.m.locks++
3749 if getcallersp() > _g_.syscallsp {
3750 throw("exitsyscall: syscall frame is no longer valid")
3751 }
3752
3753 _g_.waitsince = 0
3754 oldp := _g_.m.oldp.ptr()
3755 _g_.m.oldp = 0
3756 if exitsyscallfast(oldp) {
3757 if trace.enabled {
3758 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
3759 systemstack(traceGoStart)
3760 }
3761 }
3762
3763 _g_.m.p.ptr().syscalltick++
3764
3765 casgstatus(_g_, _Gsyscall, _Grunning)
3766
3767
3768
3769 _g_.syscallsp = 0
3770 _g_.m.locks--
3771 if _g_.preempt {
3772
3773 _g_.stackguard0 = stackPreempt
3774 } else {
3775
3776 _g_.stackguard0 = _g_.stack.lo + _StackGuard
3777 }
3778 _g_.throwsplit = false
3779
3780 if sched.disable.user && !schedEnabled(_g_) {
3781
3782 Gosched()
3783 }
3784
3785 return
3786 }
3787
3788 _g_.sysexitticks = 0
3789 if trace.enabled {
3790
3791
3792 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
3793 osyield()
3794 }
3795
3796
3797
3798
3799 _g_.sysexitticks = cputicks()
3800 }
3801
3802 _g_.m.locks--
3803
3804
3805 mcall(exitsyscall0)
3806
3807
3808
3809
3810
3811
3812
3813 _g_.syscallsp = 0
3814 _g_.m.p.ptr().syscalltick++
3815 _g_.throwsplit = false
3816 }
3817
3818
3819 func exitsyscallfast(oldp *p) bool {
3820 _g_ := getg()
3821
3822
3823 if sched.stopwait == freezeStopWait {
3824 return false
3825 }
3826
3827
3828 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
3829
3830 wirep(oldp)
3831 exitsyscallfast_reacquired()
3832 return true
3833 }
3834
3835
3836 if sched.pidle != 0 {
3837 var ok bool
3838 systemstack(func() {
3839 ok = exitsyscallfast_pidle()
3840 if ok && trace.enabled {
3841 if oldp != nil {
3842
3843
3844 for oldp.syscalltick == _g_.m.syscalltick {
3845 osyield()
3846 }
3847 }
3848 traceGoSysExit(0)
3849 }
3850 })
3851 if ok {
3852 return true
3853 }
3854 }
3855 return false
3856 }
3857
3858
3859
3860
3861
3862
3863 func exitsyscallfast_reacquired() {
3864 _g_ := getg()
3865 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
3866 if trace.enabled {
3867
3868
3869
3870 systemstack(func() {
3871
3872 traceGoSysBlock(_g_.m.p.ptr())
3873
3874 traceGoSysExit(0)
3875 })
3876 }
3877 _g_.m.p.ptr().syscalltick++
3878 }
3879 }
3880
3881 func exitsyscallfast_pidle() bool {
3882 lock(&sched.lock)
3883 _p_ := pidleget()
3884 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
3885 atomic.Store(&sched.sysmonwait, 0)
3886 notewakeup(&sched.sysmonnote)
3887 }
3888 unlock(&sched.lock)
3889 if _p_ != nil {
3890 acquirep(_p_)
3891 return true
3892 }
3893 return false
3894 }
3895
3896
3897
3898
3899
3900
3901
3902 func exitsyscall0(gp *g) {
3903 casgstatus(gp, _Gsyscall, _Grunnable)
3904 dropg()
3905 lock(&sched.lock)
3906 var _p_ *p
3907 if schedEnabled(gp) {
3908 _p_ = pidleget()
3909 }
3910 var locked bool
3911 if _p_ == nil {
3912 globrunqput(gp)
3913
3914
3915
3916
3917
3918
3919 locked = gp.lockedm != 0
3920 } else if atomic.Load(&sched.sysmonwait) != 0 {
3921 atomic.Store(&sched.sysmonwait, 0)
3922 notewakeup(&sched.sysmonnote)
3923 }
3924 unlock(&sched.lock)
3925 if _p_ != nil {
3926 acquirep(_p_)
3927 execute(gp, false)
3928 }
3929 if locked {
3930
3931
3932
3933
3934 stoplockedm()
3935 execute(gp, false)
3936 }
3937 stopm()
3938 schedule()
3939 }
3940
3941
3942
3943
3944 func syscall_runtime_BeforeFork() {
3945 gp := getg().m.curg
3946
3947
3948
3949
3950 gp.m.locks++
3951 sigsave(&gp.m.sigmask)
3952 sigblock(false)
3953
3954
3955
3956
3957
3958 gp.stackguard0 = stackFork
3959 }
3960
3961
3962
3963
3964 func syscall_runtime_AfterFork() {
3965 gp := getg().m.curg
3966
3967
3968 gp.stackguard0 = gp.stack.lo + _StackGuard
3969
3970 msigrestore(gp.m.sigmask)
3971
3972 gp.m.locks--
3973 }
3974
3975
3976
3977 var inForkedChild bool
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990 func syscall_runtime_AfterForkInChild() {
3991
3992
3993
3994
3995 inForkedChild = true
3996
3997 clearSignalHandlers()
3998
3999
4000
4001 msigrestore(getg().m.sigmask)
4002
4003 inForkedChild = false
4004 }
4005
4006
4007
4008
4009 var pendingPreemptSignals uint32
4010
4011
4012
4013 func syscall_runtime_BeforeExec() {
4014
4015 execLock.lock()
4016
4017
4018
4019 if GOOS == "darwin" || GOOS == "ios" {
4020 for int32(atomic.Load(&pendingPreemptSignals)) > 0 {
4021 osyield()
4022 }
4023 }
4024 }
4025
4026
4027
4028 func syscall_runtime_AfterExec() {
4029 execLock.unlock()
4030 }
4031
4032
4033 func malg(stacksize int32) *g {
4034 newg := new(g)
4035 if stacksize >= 0 {
4036 stacksize = round2(_StackSystem + stacksize)
4037 systemstack(func() {
4038 newg.stack = stackalloc(uint32(stacksize))
4039 })
4040 newg.stackguard0 = newg.stack.lo + _StackGuard
4041 newg.stackguard1 = ^uintptr(0)
4042
4043
4044 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
4045 }
4046 return newg
4047 }
4048
4049
4050
4051
4052 func newproc(fn *funcval) {
4053 gp := getg()
4054 pc := getcallerpc()
4055 systemstack(func() {
4056 newg := newproc1(fn, gp, pc)
4057
4058 _p_ := getg().m.p.ptr()
4059 runqput(_p_, newg, true)
4060
4061 if mainStarted {
4062 wakep()
4063 }
4064 })
4065 }
4066
4067
4068
4069
4070 func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
4071 _g_ := getg()
4072
4073 if fn == nil {
4074 _g_.m.throwing = -1
4075 throw("go of nil func value")
4076 }
4077 acquirem()
4078
4079 _p_ := _g_.m.p.ptr()
4080 newg := gfget(_p_)
4081 if newg == nil {
4082 newg = malg(_StackMin)
4083 casgstatus(newg, _Gidle, _Gdead)
4084 allgadd(newg)
4085 }
4086 if newg.stack.hi == 0 {
4087 throw("newproc1: newg missing stack")
4088 }
4089
4090 if readgstatus(newg) != _Gdead {
4091 throw("newproc1: new g is not Gdead")
4092 }
4093
4094 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
4095 totalSize = alignUp(totalSize, sys.StackAlign)
4096 sp := newg.stack.hi - totalSize
4097 spArg := sp
4098 if usesLR {
4099
4100 *(*uintptr)(unsafe.Pointer(sp)) = 0
4101 prepGoExitFrame(sp)
4102 spArg += sys.MinFrameSize
4103 }
4104
4105 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
4106 newg.sched.sp = sp
4107 newg.stktopsp = sp
4108 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
4109 newg.sched.g = guintptr(unsafe.Pointer(newg))
4110 gostartcallfn(&newg.sched, fn)
4111 newg.gopc = callerpc
4112 newg.ancestors = saveAncestors(callergp)
4113 newg.startpc = fn.fn
4114 if isSystemGoroutine(newg, false) {
4115 atomic.Xadd(&sched.ngsys, +1)
4116 } else {
4117
4118 if _g_.m.curg != nil {
4119 newg.labels = _g_.m.curg.labels
4120 }
4121 }
4122
4123 newg.trackingSeq = uint8(fastrand())
4124 if newg.trackingSeq%gTrackingPeriod == 0 {
4125 newg.tracking = true
4126 }
4127 casgstatus(newg, _Gdead, _Grunnable)
4128 gcController.addScannableStack(_p_, int64(newg.stack.hi-newg.stack.lo))
4129
4130 if _p_.goidcache == _p_.goidcacheend {
4131
4132
4133
4134 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
4135 _p_.goidcache -= _GoidCacheBatch - 1
4136 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
4137 }
4138 newg.goid = int64(_p_.goidcache)
4139 _p_.goidcache++
4140 if raceenabled {
4141 newg.racectx = racegostart(callerpc)
4142 }
4143 if trace.enabled {
4144 traceGoCreate(newg, newg.startpc)
4145 }
4146 releasem(_g_.m)
4147
4148 return newg
4149 }
4150
4151
4152
4153
4154 func saveAncestors(callergp *g) *[]ancestorInfo {
4155
4156 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
4157 return nil
4158 }
4159 var callerAncestors []ancestorInfo
4160 if callergp.ancestors != nil {
4161 callerAncestors = *callergp.ancestors
4162 }
4163 n := int32(len(callerAncestors)) + 1
4164 if n > debug.tracebackancestors {
4165 n = debug.tracebackancestors
4166 }
4167 ancestors := make([]ancestorInfo, n)
4168 copy(ancestors[1:], callerAncestors)
4169
4170 var pcs [_TracebackMaxFrames]uintptr
4171 npcs := gcallers(callergp, 0, pcs[:])
4172 ipcs := make([]uintptr, npcs)
4173 copy(ipcs, pcs[:])
4174 ancestors[0] = ancestorInfo{
4175 pcs: ipcs,
4176 goid: callergp.goid,
4177 gopc: callergp.gopc,
4178 }
4179
4180 ancestorsp := new([]ancestorInfo)
4181 *ancestorsp = ancestors
4182 return ancestorsp
4183 }
4184
4185
4186
4187 func gfput(_p_ *p, gp *g) {
4188 if readgstatus(gp) != _Gdead {
4189 throw("gfput: bad status (not Gdead)")
4190 }
4191
4192 stksize := gp.stack.hi - gp.stack.lo
4193
4194 if stksize != _FixedStack {
4195
4196 stackfree(gp.stack)
4197 gp.stack.lo = 0
4198 gp.stack.hi = 0
4199 gp.stackguard0 = 0
4200 }
4201
4202 _p_.gFree.push(gp)
4203 _p_.gFree.n++
4204 if _p_.gFree.n >= 64 {
4205 var (
4206 inc int32
4207 stackQ gQueue
4208 noStackQ gQueue
4209 )
4210 for _p_.gFree.n >= 32 {
4211 gp = _p_.gFree.pop()
4212 _p_.gFree.n--
4213 if gp.stack.lo == 0 {
4214 noStackQ.push(gp)
4215 } else {
4216 stackQ.push(gp)
4217 }
4218 inc++
4219 }
4220 lock(&sched.gFree.lock)
4221 sched.gFree.noStack.pushAll(noStackQ)
4222 sched.gFree.stack.pushAll(stackQ)
4223 sched.gFree.n += inc
4224 unlock(&sched.gFree.lock)
4225 }
4226 }
4227
4228
4229
4230 func gfget(_p_ *p) *g {
4231 retry:
4232 if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
4233 lock(&sched.gFree.lock)
4234
4235 for _p_.gFree.n < 32 {
4236
4237 gp := sched.gFree.stack.pop()
4238 if gp == nil {
4239 gp = sched.gFree.noStack.pop()
4240 if gp == nil {
4241 break
4242 }
4243 }
4244 sched.gFree.n--
4245 _p_.gFree.push(gp)
4246 _p_.gFree.n++
4247 }
4248 unlock(&sched.gFree.lock)
4249 goto retry
4250 }
4251 gp := _p_.gFree.pop()
4252 if gp == nil {
4253 return nil
4254 }
4255 _p_.gFree.n--
4256 if gp.stack.lo == 0 {
4257
4258 systemstack(func() {
4259 gp.stack = stackalloc(_FixedStack)
4260 })
4261 gp.stackguard0 = gp.stack.lo + _StackGuard
4262 } else {
4263 if raceenabled {
4264 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4265 }
4266 if msanenabled {
4267 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4268 }
4269 if asanenabled {
4270 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4271 }
4272 }
4273 return gp
4274 }
4275
4276
4277 func gfpurge(_p_ *p) {
4278 var (
4279 inc int32
4280 stackQ gQueue
4281 noStackQ gQueue
4282 )
4283 for !_p_.gFree.empty() {
4284 gp := _p_.gFree.pop()
4285 _p_.gFree.n--
4286 if gp.stack.lo == 0 {
4287 noStackQ.push(gp)
4288 } else {
4289 stackQ.push(gp)
4290 }
4291 inc++
4292 }
4293 lock(&sched.gFree.lock)
4294 sched.gFree.noStack.pushAll(noStackQ)
4295 sched.gFree.stack.pushAll(stackQ)
4296 sched.gFree.n += inc
4297 unlock(&sched.gFree.lock)
4298 }
4299
4300
4301 func Breakpoint() {
4302 breakpoint()
4303 }
4304
4305
4306
4307
4308
4309 func dolockOSThread() {
4310 if GOARCH == "wasm" {
4311 return
4312 }
4313 _g_ := getg()
4314 _g_.m.lockedg.set(_g_)
4315 _g_.lockedm.set(_g_.m)
4316 }
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334 func LockOSThread() {
4335 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
4336
4337
4338
4339 startTemplateThread()
4340 }
4341 _g_ := getg()
4342 _g_.m.lockedExt++
4343 if _g_.m.lockedExt == 0 {
4344 _g_.m.lockedExt--
4345 panic("LockOSThread nesting overflow")
4346 }
4347 dolockOSThread()
4348 }
4349
4350
4351 func lockOSThread() {
4352 getg().m.lockedInt++
4353 dolockOSThread()
4354 }
4355
4356
4357
4358
4359
4360 func dounlockOSThread() {
4361 if GOARCH == "wasm" {
4362 return
4363 }
4364 _g_ := getg()
4365 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
4366 return
4367 }
4368 _g_.m.lockedg = 0
4369 _g_.lockedm = 0
4370 }
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386 func UnlockOSThread() {
4387 _g_ := getg()
4388 if _g_.m.lockedExt == 0 {
4389 return
4390 }
4391 _g_.m.lockedExt--
4392 dounlockOSThread()
4393 }
4394
4395
4396 func unlockOSThread() {
4397 _g_ := getg()
4398 if _g_.m.lockedInt == 0 {
4399 systemstack(badunlockosthread)
4400 }
4401 _g_.m.lockedInt--
4402 dounlockOSThread()
4403 }
4404
4405 func badunlockosthread() {
4406 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
4407 }
4408
4409 func gcount() int32 {
4410 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
4411 for _, _p_ := range allp {
4412 n -= _p_.gFree.n
4413 }
4414
4415
4416
4417 if n < 1 {
4418 n = 1
4419 }
4420 return n
4421 }
4422
4423 func mcount() int32 {
4424 return int32(sched.mnext - sched.nmfreed)
4425 }
4426
4427 var prof struct {
4428 signalLock uint32
4429 hz int32
4430 }
4431
4432 func _System() { _System() }
4433 func _ExternalCode() { _ExternalCode() }
4434 func _LostExternalCode() { _LostExternalCode() }
4435 func _GC() { _GC() }
4436 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
4437 func _VDSO() { _VDSO() }
4438
4439
4440
4441
4442 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
4443 if prof.hz == 0 {
4444 return
4445 }
4446
4447
4448
4449
4450 if mp != nil && mp.profilehz == 0 {
4451 return
4452 }
4453
4454
4455
4456
4457
4458
4459
4460 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
4461 if f := findfunc(pc); f.valid() {
4462 if hasPrefix(funcname(f), "runtime/internal/atomic") {
4463 cpuprof.lostAtomic++
4464 return
4465 }
4466 }
4467 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
4468
4469
4470
4471 cpuprof.lostAtomic++
4472 return
4473 }
4474 }
4475
4476
4477
4478
4479
4480
4481
4482 getg().m.mallocing++
4483
4484 var stk [maxCPUProfStack]uintptr
4485 n := 0
4486 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
4487 cgoOff := 0
4488
4489
4490
4491
4492
4493 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
4494 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
4495 cgoOff++
4496 }
4497 copy(stk[:], mp.cgoCallers[:cgoOff])
4498 mp.cgoCallers[0] = 0
4499 }
4500
4501
4502 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0)
4503 if n > 0 {
4504 n += cgoOff
4505 }
4506 } else {
4507 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
4508 }
4509
4510 if n <= 0 {
4511
4512
4513 n = 0
4514 if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
4515
4516
4517 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
4518 }
4519 if n == 0 && mp != nil && mp.vdsoSP != 0 {
4520 n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
4521 }
4522 if n == 0 {
4523
4524 n = 2
4525 if inVDSOPage(pc) {
4526 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
4527 } else if pc > firstmoduledata.etext {
4528
4529 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
4530 }
4531 stk[0] = pc
4532 if mp.preemptoff != "" {
4533 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
4534 } else {
4535 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
4536 }
4537 }
4538 }
4539
4540 if prof.hz != 0 {
4541
4542
4543
4544 var tagPtr *unsafe.Pointer
4545 if gp != nil && gp.m != nil && gp.m.curg != nil {
4546 tagPtr = &gp.m.curg.labels
4547 }
4548 cpuprof.add(tagPtr, stk[:n])
4549 }
4550 getg().m.mallocing--
4551 }
4552
4553
4554
4555 func setcpuprofilerate(hz int32) {
4556
4557 if hz < 0 {
4558 hz = 0
4559 }
4560
4561
4562
4563 _g_ := getg()
4564 _g_.m.locks++
4565
4566
4567
4568
4569 setThreadCPUProfiler(0)
4570
4571 for !atomic.Cas(&prof.signalLock, 0, 1) {
4572 osyield()
4573 }
4574 if prof.hz != hz {
4575 setProcessCPUProfiler(hz)
4576 prof.hz = hz
4577 }
4578 atomic.Store(&prof.signalLock, 0)
4579
4580 lock(&sched.lock)
4581 sched.profilehz = hz
4582 unlock(&sched.lock)
4583
4584 if hz != 0 {
4585 setThreadCPUProfiler(hz)
4586 }
4587
4588 _g_.m.locks--
4589 }
4590
4591
4592
4593 func (pp *p) init(id int32) {
4594 pp.id = id
4595 pp.status = _Pgcstop
4596 pp.sudogcache = pp.sudogbuf[:0]
4597 pp.deferpool = pp.deferpoolbuf[:0]
4598 pp.wbBuf.reset()
4599 if pp.mcache == nil {
4600 if id == 0 {
4601 if mcache0 == nil {
4602 throw("missing mcache?")
4603 }
4604
4605
4606 pp.mcache = mcache0
4607 } else {
4608 pp.mcache = allocmcache()
4609 }
4610 }
4611 if raceenabled && pp.raceprocctx == 0 {
4612 if id == 0 {
4613 pp.raceprocctx = raceprocctx0
4614 raceprocctx0 = 0
4615 } else {
4616 pp.raceprocctx = raceproccreate()
4617 }
4618 }
4619 lockInit(&pp.timersLock, lockRankTimers)
4620
4621
4622
4623 timerpMask.set(id)
4624
4625
4626 idlepMask.clear(id)
4627 }
4628
4629
4630
4631
4632
4633 func (pp *p) destroy() {
4634 assertLockHeld(&sched.lock)
4635 assertWorldStopped()
4636
4637
4638 for pp.runqhead != pp.runqtail {
4639
4640 pp.runqtail--
4641 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
4642
4643 globrunqputhead(gp)
4644 }
4645 if pp.runnext != 0 {
4646 globrunqputhead(pp.runnext.ptr())
4647 pp.runnext = 0
4648 }
4649 if len(pp.timers) > 0 {
4650 plocal := getg().m.p.ptr()
4651
4652
4653
4654
4655 lock(&plocal.timersLock)
4656 lock(&pp.timersLock)
4657 moveTimers(plocal, pp.timers)
4658 pp.timers = nil
4659 pp.numTimers = 0
4660 pp.deletedTimers = 0
4661 atomic.Store64(&pp.timer0When, 0)
4662 unlock(&pp.timersLock)
4663 unlock(&plocal.timersLock)
4664 }
4665
4666 if gcphase != _GCoff {
4667 wbBufFlush1(pp)
4668 pp.gcw.dispose()
4669 }
4670 for i := range pp.sudogbuf {
4671 pp.sudogbuf[i] = nil
4672 }
4673 pp.sudogcache = pp.sudogbuf[:0]
4674 for j := range pp.deferpoolbuf {
4675 pp.deferpoolbuf[j] = nil
4676 }
4677 pp.deferpool = pp.deferpoolbuf[:0]
4678 systemstack(func() {
4679 for i := 0; i < pp.mspancache.len; i++ {
4680
4681 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
4682 }
4683 pp.mspancache.len = 0
4684 lock(&mheap_.lock)
4685 pp.pcache.flush(&mheap_.pages)
4686 unlock(&mheap_.lock)
4687 })
4688 freemcache(pp.mcache)
4689 pp.mcache = nil
4690 gfpurge(pp)
4691 traceProcFree(pp)
4692 if raceenabled {
4693 if pp.timerRaceCtx != 0 {
4694
4695
4696
4697
4698
4699 mp := getg().m
4700 phold := mp.p.ptr()
4701 mp.p.set(pp)
4702
4703 racectxend(pp.timerRaceCtx)
4704 pp.timerRaceCtx = 0
4705
4706 mp.p.set(phold)
4707 }
4708 raceprocdestroy(pp.raceprocctx)
4709 pp.raceprocctx = 0
4710 }
4711 pp.gcAssistTime = 0
4712 pp.status = _Pdead
4713 }
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723 func procresize(nprocs int32) *p {
4724 assertLockHeld(&sched.lock)
4725 assertWorldStopped()
4726
4727 old := gomaxprocs
4728 if old < 0 || nprocs <= 0 {
4729 throw("procresize: invalid arg")
4730 }
4731 if trace.enabled {
4732 traceGomaxprocs(nprocs)
4733 }
4734
4735
4736 now := nanotime()
4737 if sched.procresizetime != 0 {
4738 sched.totaltime += int64(old) * (now - sched.procresizetime)
4739 }
4740 sched.procresizetime = now
4741
4742 maskWords := (nprocs + 31) / 32
4743
4744
4745 if nprocs > int32(len(allp)) {
4746
4747
4748 lock(&allpLock)
4749 if nprocs <= int32(cap(allp)) {
4750 allp = allp[:nprocs]
4751 } else {
4752 nallp := make([]*p, nprocs)
4753
4754
4755 copy(nallp, allp[:cap(allp)])
4756 allp = nallp
4757 }
4758
4759 if maskWords <= int32(cap(idlepMask)) {
4760 idlepMask = idlepMask[:maskWords]
4761 timerpMask = timerpMask[:maskWords]
4762 } else {
4763 nidlepMask := make([]uint32, maskWords)
4764
4765 copy(nidlepMask, idlepMask)
4766 idlepMask = nidlepMask
4767
4768 ntimerpMask := make([]uint32, maskWords)
4769 copy(ntimerpMask, timerpMask)
4770 timerpMask = ntimerpMask
4771 }
4772 unlock(&allpLock)
4773 }
4774
4775
4776 for i := old; i < nprocs; i++ {
4777 pp := allp[i]
4778 if pp == nil {
4779 pp = new(p)
4780 }
4781 pp.init(i)
4782 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
4783 }
4784
4785 _g_ := getg()
4786 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
4787
4788 _g_.m.p.ptr().status = _Prunning
4789 _g_.m.p.ptr().mcache.prepareForSweep()
4790 } else {
4791
4792
4793
4794
4795
4796 if _g_.m.p != 0 {
4797 if trace.enabled {
4798
4799
4800
4801 traceGoSched()
4802 traceProcStop(_g_.m.p.ptr())
4803 }
4804 _g_.m.p.ptr().m = 0
4805 }
4806 _g_.m.p = 0
4807 p := allp[0]
4808 p.m = 0
4809 p.status = _Pidle
4810 acquirep(p)
4811 if trace.enabled {
4812 traceGoStart()
4813 }
4814 }
4815
4816
4817 mcache0 = nil
4818
4819
4820 for i := nprocs; i < old; i++ {
4821 p := allp[i]
4822 p.destroy()
4823
4824 }
4825
4826
4827 if int32(len(allp)) != nprocs {
4828 lock(&allpLock)
4829 allp = allp[:nprocs]
4830 idlepMask = idlepMask[:maskWords]
4831 timerpMask = timerpMask[:maskWords]
4832 unlock(&allpLock)
4833 }
4834
4835 var runnablePs *p
4836 for i := nprocs - 1; i >= 0; i-- {
4837 p := allp[i]
4838 if _g_.m.p.ptr() == p {
4839 continue
4840 }
4841 p.status = _Pidle
4842 if runqempty(p) {
4843 pidleput(p)
4844 } else {
4845 p.m.set(mget())
4846 p.link.set(runnablePs)
4847 runnablePs = p
4848 }
4849 }
4850 stealOrder.reset(uint32(nprocs))
4851 var int32p *int32 = &gomaxprocs
4852 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
4853 return runnablePs
4854 }
4855
4856
4857
4858
4859
4860
4861
4862 func acquirep(_p_ *p) {
4863
4864 wirep(_p_)
4865
4866
4867
4868
4869
4870 _p_.mcache.prepareForSweep()
4871
4872 if trace.enabled {
4873 traceProcStart()
4874 }
4875 }
4876
4877
4878
4879
4880
4881
4882
4883 func wirep(_p_ *p) {
4884 _g_ := getg()
4885
4886 if _g_.m.p != 0 {
4887 throw("wirep: already in go")
4888 }
4889 if _p_.m != 0 || _p_.status != _Pidle {
4890 id := int64(0)
4891 if _p_.m != 0 {
4892 id = _p_.m.ptr().id
4893 }
4894 print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
4895 throw("wirep: invalid p state")
4896 }
4897 _g_.m.p.set(_p_)
4898 _p_.m.set(_g_.m)
4899 _p_.status = _Prunning
4900 }
4901
4902
4903 func releasep() *p {
4904 _g_ := getg()
4905
4906 if _g_.m.p == 0 {
4907 throw("releasep: invalid arg")
4908 }
4909 _p_ := _g_.m.p.ptr()
4910 if _p_.m.ptr() != _g_.m || _p_.status != _Prunning {
4911 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n")
4912 throw("releasep: invalid p state")
4913 }
4914 if trace.enabled {
4915 traceProcStop(_g_.m.p.ptr())
4916 }
4917 _g_.m.p = 0
4918 _p_.m = 0
4919 _p_.status = _Pidle
4920 return _p_
4921 }
4922
4923 func incidlelocked(v int32) {
4924 lock(&sched.lock)
4925 sched.nmidlelocked += v
4926 if v > 0 {
4927 checkdead()
4928 }
4929 unlock(&sched.lock)
4930 }
4931
4932
4933
4934
4935 func checkdead() {
4936 assertLockHeld(&sched.lock)
4937
4938
4939
4940
4941 if islibrary || isarchive {
4942 return
4943 }
4944
4945
4946
4947
4948
4949 if panicking > 0 {
4950 return
4951 }
4952
4953
4954
4955
4956
4957 var run0 int32
4958 if !iscgo && cgoHasExtraM {
4959 mp := lockextra(true)
4960 haveExtraM := extraMCount > 0
4961 unlockextra(mp)
4962 if haveExtraM {
4963 run0 = 1
4964 }
4965 }
4966
4967 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
4968 if run > run0 {
4969 return
4970 }
4971 if run < 0 {
4972 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
4973 throw("checkdead: inconsistent counts")
4974 }
4975
4976 grunning := 0
4977 forEachG(func(gp *g) {
4978 if isSystemGoroutine(gp, false) {
4979 return
4980 }
4981 s := readgstatus(gp)
4982 switch s &^ _Gscan {
4983 case _Gwaiting,
4984 _Gpreempted:
4985 grunning++
4986 case _Grunnable,
4987 _Grunning,
4988 _Gsyscall:
4989 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
4990 throw("checkdead: runnable g")
4991 }
4992 })
4993 if grunning == 0 {
4994 unlock(&sched.lock)
4995 throw("no goroutines (main called runtime.Goexit) - deadlock!")
4996 }
4997
4998
4999 if faketime != 0 {
5000 when, _p_ := timeSleepUntil()
5001 if _p_ != nil {
5002 faketime = when
5003 for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link {
5004 if (*pp).ptr() == _p_ {
5005 *pp = _p_.link
5006 break
5007 }
5008 }
5009 mp := mget()
5010 if mp == nil {
5011
5012
5013 throw("checkdead: no m for timer")
5014 }
5015 mp.nextp.set(_p_)
5016 notewakeup(&mp.park)
5017 return
5018 }
5019 }
5020
5021
5022 for _, _p_ := range allp {
5023 if len(_p_.timers) > 0 {
5024 return
5025 }
5026 }
5027
5028 getg().m.throwing = -1
5029 unlock(&sched.lock)
5030 throw("all goroutines are asleep - deadlock!")
5031 }
5032
5033
5034
5035
5036
5037
5038 var forcegcperiod int64 = 2 * 60 * 1e9
5039
5040
5041
5042 var needSysmonWorkaround bool = false
5043
5044
5045
5046
5047 func sysmon() {
5048 lock(&sched.lock)
5049 sched.nmsys++
5050 checkdead()
5051 unlock(&sched.lock)
5052
5053 lasttrace := int64(0)
5054 idle := 0
5055 delay := uint32(0)
5056
5057 for {
5058 if idle == 0 {
5059 delay = 20
5060 } else if idle > 50 {
5061 delay *= 2
5062 }
5063 if delay > 10*1000 {
5064 delay = 10 * 1000
5065 }
5066 usleep(delay)
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083 now := nanotime()
5084 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
5085 lock(&sched.lock)
5086 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
5087 syscallWake := false
5088 next, _ := timeSleepUntil()
5089 if next > now {
5090 atomic.Store(&sched.sysmonwait, 1)
5091 unlock(&sched.lock)
5092
5093
5094 sleep := forcegcperiod / 2
5095 if next-now < sleep {
5096 sleep = next - now
5097 }
5098 shouldRelax := sleep >= osRelaxMinNS
5099 if shouldRelax {
5100 osRelax(true)
5101 }
5102 syscallWake = notetsleep(&sched.sysmonnote, sleep)
5103 if shouldRelax {
5104 osRelax(false)
5105 }
5106 lock(&sched.lock)
5107 atomic.Store(&sched.sysmonwait, 0)
5108 noteclear(&sched.sysmonnote)
5109 }
5110 if syscallWake {
5111 idle = 0
5112 delay = 20
5113 }
5114 }
5115 unlock(&sched.lock)
5116 }
5117
5118 lock(&sched.sysmonlock)
5119
5120
5121 now = nanotime()
5122
5123
5124 if *cgo_yield != nil {
5125 asmcgocall(*cgo_yield, nil)
5126 }
5127
5128 lastpoll := int64(atomic.Load64(&sched.lastpoll))
5129 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
5130 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
5131 list := netpoll(0)
5132 if !list.empty() {
5133
5134
5135
5136
5137
5138
5139
5140 incidlelocked(-1)
5141 injectglist(&list)
5142 incidlelocked(1)
5143 }
5144 }
5145 if GOOS == "netbsd" && needSysmonWorkaround {
5146
5147
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161 if next, _ := timeSleepUntil(); next < now {
5162 startm(nil, false)
5163 }
5164 }
5165 if atomic.Load(&scavenge.sysmonWake) != 0 {
5166
5167 wakeScavenger()
5168 }
5169
5170
5171 if retake(now) != 0 {
5172 idle = 0
5173 } else {
5174 idle++
5175 }
5176
5177 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
5178 lock(&forcegc.lock)
5179 forcegc.idle = 0
5180 var list gList
5181 list.push(forcegc.g)
5182 injectglist(&list)
5183 unlock(&forcegc.lock)
5184 }
5185 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
5186 lasttrace = now
5187 schedtrace(debug.scheddetail > 0)
5188 }
5189 unlock(&sched.sysmonlock)
5190 }
5191 }
5192
5193 type sysmontick struct {
5194 schedtick uint32
5195 schedwhen int64
5196 syscalltick uint32
5197 syscallwhen int64
5198 }
5199
5200
5201
5202 const forcePreemptNS = 10 * 1000 * 1000
5203
5204 func retake(now int64) uint32 {
5205 n := 0
5206
5207
5208 lock(&allpLock)
5209
5210
5211
5212 for i := 0; i < len(allp); i++ {
5213 _p_ := allp[i]
5214 if _p_ == nil {
5215
5216
5217 continue
5218 }
5219 pd := &_p_.sysmontick
5220 s := _p_.status
5221 sysretake := false
5222 if s == _Prunning || s == _Psyscall {
5223
5224 t := int64(_p_.schedtick)
5225 if int64(pd.schedtick) != t {
5226 pd.schedtick = uint32(t)
5227 pd.schedwhen = now
5228 } else if pd.schedwhen+forcePreemptNS <= now {
5229 preemptone(_p_)
5230
5231
5232 sysretake = true
5233 }
5234 }
5235 if s == _Psyscall {
5236
5237 t := int64(_p_.syscalltick)
5238 if !sysretake && int64(pd.syscalltick) != t {
5239 pd.syscalltick = uint32(t)
5240 pd.syscallwhen = now
5241 continue
5242 }
5243
5244
5245
5246 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
5247 continue
5248 }
5249
5250 unlock(&allpLock)
5251
5252
5253
5254
5255 incidlelocked(-1)
5256 if atomic.Cas(&_p_.status, s, _Pidle) {
5257 if trace.enabled {
5258 traceGoSysBlock(_p_)
5259 traceProcStop(_p_)
5260 }
5261 n++
5262 _p_.syscalltick++
5263 handoffp(_p_)
5264 }
5265 incidlelocked(1)
5266 lock(&allpLock)
5267 }
5268 }
5269 unlock(&allpLock)
5270 return uint32(n)
5271 }
5272
5273
5274
5275
5276
5277
5278 func preemptall() bool {
5279 res := false
5280 for _, _p_ := range allp {
5281 if _p_.status != _Prunning {
5282 continue
5283 }
5284 if preemptone(_p_) {
5285 res = true
5286 }
5287 }
5288 return res
5289 }
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301 func preemptone(_p_ *p) bool {
5302 mp := _p_.m.ptr()
5303 if mp == nil || mp == getg().m {
5304 return false
5305 }
5306 gp := mp.curg
5307 if gp == nil || gp == mp.g0 {
5308 return false
5309 }
5310
5311 gp.preempt = true
5312
5313
5314
5315
5316
5317 gp.stackguard0 = stackPreempt
5318
5319
5320 if preemptMSupported && debug.asyncpreemptoff == 0 {
5321 _p_.preempt = true
5322 preemptM(mp)
5323 }
5324
5325 return true
5326 }
5327
5328 var starttime int64
5329
5330 func schedtrace(detailed bool) {
5331 now := nanotime()
5332 if starttime == 0 {
5333 starttime = now
5334 }
5335
5336 lock(&sched.lock)
5337 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
5338 if detailed {
5339 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
5340 }
5341
5342
5343
5344 for i, _p_ := range allp {
5345 mp := _p_.m.ptr()
5346 h := atomic.Load(&_p_.runqhead)
5347 t := atomic.Load(&_p_.runqtail)
5348 if detailed {
5349 id := int64(-1)
5350 if mp != nil {
5351 id = mp.id
5352 }
5353 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, " timerslen=", len(_p_.timers), "\n")
5354 } else {
5355
5356
5357 print(" ")
5358 if i == 0 {
5359 print("[")
5360 }
5361 print(t - h)
5362 if i == len(allp)-1 {
5363 print("]\n")
5364 }
5365 }
5366 }
5367
5368 if !detailed {
5369 unlock(&sched.lock)
5370 return
5371 }
5372
5373 for mp := allm; mp != nil; mp = mp.alllink {
5374 _p_ := mp.p.ptr()
5375 gp := mp.curg
5376 lockedg := mp.lockedg.ptr()
5377 id1 := int32(-1)
5378 if _p_ != nil {
5379 id1 = _p_.id
5380 }
5381 id2 := int64(-1)
5382 if gp != nil {
5383 id2 = gp.goid
5384 }
5385 id3 := int64(-1)
5386 if lockedg != nil {
5387 id3 = lockedg.goid
5388 }
5389 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
5390 }
5391
5392 forEachG(func(gp *g) {
5393 mp := gp.m
5394 lockedm := gp.lockedm.ptr()
5395 id1 := int64(-1)
5396 if mp != nil {
5397 id1 = mp.id
5398 }
5399 id2 := int64(-1)
5400 if lockedm != nil {
5401 id2 = lockedm.id
5402 }
5403 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n")
5404 })
5405 unlock(&sched.lock)
5406 }
5407
5408
5409
5410
5411
5412
5413 func schedEnableUser(enable bool) {
5414 lock(&sched.lock)
5415 if sched.disable.user == !enable {
5416 unlock(&sched.lock)
5417 return
5418 }
5419 sched.disable.user = !enable
5420 if enable {
5421 n := sched.disable.n
5422 sched.disable.n = 0
5423 globrunqputbatch(&sched.disable.runnable, n)
5424 unlock(&sched.lock)
5425 for ; n != 0 && sched.npidle != 0; n-- {
5426 startm(nil, false)
5427 }
5428 } else {
5429 unlock(&sched.lock)
5430 }
5431 }
5432
5433
5434
5435
5436
5437 func schedEnabled(gp *g) bool {
5438 assertLockHeld(&sched.lock)
5439
5440 if sched.disable.user {
5441 return isSystemGoroutine(gp, true)
5442 }
5443 return true
5444 }
5445
5446
5447
5448
5449
5450 func mput(mp *m) {
5451 assertLockHeld(&sched.lock)
5452
5453 mp.schedlink = sched.midle
5454 sched.midle.set(mp)
5455 sched.nmidle++
5456 checkdead()
5457 }
5458
5459
5460
5461
5462
5463 func mget() *m {
5464 assertLockHeld(&sched.lock)
5465
5466 mp := sched.midle.ptr()
5467 if mp != nil {
5468 sched.midle = mp.schedlink
5469 sched.nmidle--
5470 }
5471 return mp
5472 }
5473
5474
5475
5476
5477
5478 func globrunqput(gp *g) {
5479 assertLockHeld(&sched.lock)
5480
5481 sched.runq.pushBack(gp)
5482 sched.runqsize++
5483 }
5484
5485
5486
5487
5488
5489 func globrunqputhead(gp *g) {
5490 assertLockHeld(&sched.lock)
5491
5492 sched.runq.push(gp)
5493 sched.runqsize++
5494 }
5495
5496
5497
5498
5499
5500
5501 func globrunqputbatch(batch *gQueue, n int32) {
5502 assertLockHeld(&sched.lock)
5503
5504 sched.runq.pushBackAll(*batch)
5505 sched.runqsize += n
5506 *batch = gQueue{}
5507 }
5508
5509
5510
5511 func globrunqget(_p_ *p, max int32) *g {
5512 assertLockHeld(&sched.lock)
5513
5514 if sched.runqsize == 0 {
5515 return nil
5516 }
5517
5518 n := sched.runqsize/gomaxprocs + 1
5519 if n > sched.runqsize {
5520 n = sched.runqsize
5521 }
5522 if max > 0 && n > max {
5523 n = max
5524 }
5525 if n > int32(len(_p_.runq))/2 {
5526 n = int32(len(_p_.runq)) / 2
5527 }
5528
5529 sched.runqsize -= n
5530
5531 gp := sched.runq.pop()
5532 n--
5533 for ; n > 0; n-- {
5534 gp1 := sched.runq.pop()
5535 runqput(_p_, gp1, false)
5536 }
5537 return gp
5538 }
5539
5540
5541 type pMask []uint32
5542
5543
5544 func (p pMask) read(id uint32) bool {
5545 word := id / 32
5546 mask := uint32(1) << (id % 32)
5547 return (atomic.Load(&p[word]) & mask) != 0
5548 }
5549
5550
5551 func (p pMask) set(id int32) {
5552 word := id / 32
5553 mask := uint32(1) << (id % 32)
5554 atomic.Or(&p[word], mask)
5555 }
5556
5557
5558 func (p pMask) clear(id int32) {
5559 word := id / 32
5560 mask := uint32(1) << (id % 32)
5561 atomic.And(&p[word], ^mask)
5562 }
5563
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589 func updateTimerPMask(pp *p) {
5590 if atomic.Load(&pp.numTimers) > 0 {
5591 return
5592 }
5593
5594
5595
5596
5597 lock(&pp.timersLock)
5598 if atomic.Load(&pp.numTimers) == 0 {
5599 timerpMask.clear(pp.id)
5600 }
5601 unlock(&pp.timersLock)
5602 }
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613 func pidleput(_p_ *p) {
5614 assertLockHeld(&sched.lock)
5615
5616 if !runqempty(_p_) {
5617 throw("pidleput: P has non-empty run queue")
5618 }
5619 updateTimerPMask(_p_)
5620 idlepMask.set(_p_.id)
5621 _p_.link = sched.pidle
5622 sched.pidle.set(_p_)
5623 atomic.Xadd(&sched.npidle, 1)
5624 }
5625
5626
5627
5628
5629
5630
5631
5632 func pidleget() *p {
5633 assertLockHeld(&sched.lock)
5634
5635 _p_ := sched.pidle.ptr()
5636 if _p_ != nil {
5637
5638 timerpMask.set(_p_.id)
5639 idlepMask.clear(_p_.id)
5640 sched.pidle = _p_.link
5641 atomic.Xadd(&sched.npidle, -1)
5642 }
5643 return _p_
5644 }
5645
5646
5647
5648 func runqempty(_p_ *p) bool {
5649
5650
5651
5652
5653 for {
5654 head := atomic.Load(&_p_.runqhead)
5655 tail := atomic.Load(&_p_.runqtail)
5656 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
5657 if tail == atomic.Load(&_p_.runqtail) {
5658 return head == tail && runnext == 0
5659 }
5660 }
5661 }
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672 const randomizeScheduler = raceenabled
5673
5674
5675
5676
5677
5678
5679 func runqput(_p_ *p, gp *g, next bool) {
5680 if randomizeScheduler && next && fastrandn(2) == 0 {
5681 next = false
5682 }
5683
5684 if next {
5685 retryNext:
5686 oldnext := _p_.runnext
5687 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
5688 goto retryNext
5689 }
5690 if oldnext == 0 {
5691 return
5692 }
5693
5694 gp = oldnext.ptr()
5695 }
5696
5697 retry:
5698 h := atomic.LoadAcq(&_p_.runqhead)
5699 t := _p_.runqtail
5700 if t-h < uint32(len(_p_.runq)) {
5701 _p_.runq[t%uint32(len(_p_.runq))].set(gp)
5702 atomic.StoreRel(&_p_.runqtail, t+1)
5703 return
5704 }
5705 if runqputslow(_p_, gp, h, t) {
5706 return
5707 }
5708
5709 goto retry
5710 }
5711
5712
5713
5714 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
5715 var batch [len(_p_.runq)/2 + 1]*g
5716
5717
5718 n := t - h
5719 n = n / 2
5720 if n != uint32(len(_p_.runq)/2) {
5721 throw("runqputslow: queue is not full")
5722 }
5723 for i := uint32(0); i < n; i++ {
5724 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
5725 }
5726 if !atomic.CasRel(&_p_.runqhead, h, h+n) {
5727 return false
5728 }
5729 batch[n] = gp
5730
5731 if randomizeScheduler {
5732 for i := uint32(1); i <= n; i++ {
5733 j := fastrandn(i + 1)
5734 batch[i], batch[j] = batch[j], batch[i]
5735 }
5736 }
5737
5738
5739 for i := uint32(0); i < n; i++ {
5740 batch[i].schedlink.set(batch[i+1])
5741 }
5742 var q gQueue
5743 q.head.set(batch[0])
5744 q.tail.set(batch[n])
5745
5746
5747 lock(&sched.lock)
5748 globrunqputbatch(&q, int32(n+1))
5749 unlock(&sched.lock)
5750 return true
5751 }
5752
5753
5754
5755
5756
5757 func runqputbatch(pp *p, q *gQueue, qsize int) {
5758 h := atomic.LoadAcq(&pp.runqhead)
5759 t := pp.runqtail
5760 n := uint32(0)
5761 for !q.empty() && t-h < uint32(len(pp.runq)) {
5762 gp := q.pop()
5763 pp.runq[t%uint32(len(pp.runq))].set(gp)
5764 t++
5765 n++
5766 }
5767 qsize -= int(n)
5768
5769 if randomizeScheduler {
5770 off := func(o uint32) uint32 {
5771 return (pp.runqtail + o) % uint32(len(pp.runq))
5772 }
5773 for i := uint32(1); i < n; i++ {
5774 j := fastrandn(i + 1)
5775 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
5776 }
5777 }
5778
5779 atomic.StoreRel(&pp.runqtail, t)
5780 if !q.empty() {
5781 lock(&sched.lock)
5782 globrunqputbatch(q, int32(qsize))
5783 unlock(&sched.lock)
5784 }
5785 }
5786
5787
5788
5789
5790
5791 func runqget(_p_ *p) (gp *g, inheritTime bool) {
5792
5793 next := _p_.runnext
5794
5795
5796
5797 if next != 0 && _p_.runnext.cas(next, 0) {
5798 return next.ptr(), true
5799 }
5800
5801 for {
5802 h := atomic.LoadAcq(&_p_.runqhead)
5803 t := _p_.runqtail
5804 if t == h {
5805 return nil, false
5806 }
5807 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
5808 if atomic.CasRel(&_p_.runqhead, h, h+1) {
5809 return gp, false
5810 }
5811 }
5812 }
5813
5814
5815
5816 func runqdrain(_p_ *p) (drainQ gQueue, n uint32) {
5817 oldNext := _p_.runnext
5818 if oldNext != 0 && _p_.runnext.cas(oldNext, 0) {
5819 drainQ.pushBack(oldNext.ptr())
5820 n++
5821 }
5822
5823 retry:
5824 h := atomic.LoadAcq(&_p_.runqhead)
5825 t := _p_.runqtail
5826 qn := t - h
5827 if qn == 0 {
5828 return
5829 }
5830 if qn > uint32(len(_p_.runq)) {
5831 goto retry
5832 }
5833
5834 if !atomic.CasRel(&_p_.runqhead, h, h+qn) {
5835 goto retry
5836 }
5837
5838
5839
5840
5841
5842
5843
5844
5845 for i := uint32(0); i < qn; i++ {
5846 gp := _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
5847 drainQ.pushBack(gp)
5848 n++
5849 }
5850 return
5851 }
5852
5853
5854
5855
5856
5857 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
5858 for {
5859 h := atomic.LoadAcq(&_p_.runqhead)
5860 t := atomic.LoadAcq(&_p_.runqtail)
5861 n := t - h
5862 n = n - n/2
5863 if n == 0 {
5864 if stealRunNextG {
5865
5866 if next := _p_.runnext; next != 0 {
5867 if _p_.status == _Prunning {
5868
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878 if GOOS != "windows" {
5879 usleep(3)
5880 } else {
5881
5882
5883
5884 osyield()
5885 }
5886 }
5887 if !_p_.runnext.cas(next, 0) {
5888 continue
5889 }
5890 batch[batchHead%uint32(len(batch))] = next
5891 return 1
5892 }
5893 }
5894 return 0
5895 }
5896 if n > uint32(len(_p_.runq)/2) {
5897 continue
5898 }
5899 for i := uint32(0); i < n; i++ {
5900 g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
5901 batch[(batchHead+i)%uint32(len(batch))] = g
5902 }
5903 if atomic.CasRel(&_p_.runqhead, h, h+n) {
5904 return n
5905 }
5906 }
5907 }
5908
5909
5910
5911
5912 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
5913 t := _p_.runqtail
5914 n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
5915 if n == 0 {
5916 return nil
5917 }
5918 n--
5919 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
5920 if n == 0 {
5921 return gp
5922 }
5923 h := atomic.LoadAcq(&_p_.runqhead)
5924 if t-h+n >= uint32(len(_p_.runq)) {
5925 throw("runqsteal: runq overflow")
5926 }
5927 atomic.StoreRel(&_p_.runqtail, t+n)
5928 return gp
5929 }
5930
5931
5932
5933 type gQueue struct {
5934 head guintptr
5935 tail guintptr
5936 }
5937
5938
5939 func (q *gQueue) empty() bool {
5940 return q.head == 0
5941 }
5942
5943
5944 func (q *gQueue) push(gp *g) {
5945 gp.schedlink = q.head
5946 q.head.set(gp)
5947 if q.tail == 0 {
5948 q.tail.set(gp)
5949 }
5950 }
5951
5952
5953 func (q *gQueue) pushBack(gp *g) {
5954 gp.schedlink = 0
5955 if q.tail != 0 {
5956 q.tail.ptr().schedlink.set(gp)
5957 } else {
5958 q.head.set(gp)
5959 }
5960 q.tail.set(gp)
5961 }
5962
5963
5964
5965 func (q *gQueue) pushBackAll(q2 gQueue) {
5966 if q2.tail == 0 {
5967 return
5968 }
5969 q2.tail.ptr().schedlink = 0
5970 if q.tail != 0 {
5971 q.tail.ptr().schedlink = q2.head
5972 } else {
5973 q.head = q2.head
5974 }
5975 q.tail = q2.tail
5976 }
5977
5978
5979
5980 func (q *gQueue) pop() *g {
5981 gp := q.head.ptr()
5982 if gp != nil {
5983 q.head = gp.schedlink
5984 if q.head == 0 {
5985 q.tail = 0
5986 }
5987 }
5988 return gp
5989 }
5990
5991
5992 func (q *gQueue) popList() gList {
5993 stack := gList{q.head}
5994 *q = gQueue{}
5995 return stack
5996 }
5997
5998
5999
6000 type gList struct {
6001 head guintptr
6002 }
6003
6004
6005 func (l *gList) empty() bool {
6006 return l.head == 0
6007 }
6008
6009
6010 func (l *gList) push(gp *g) {
6011 gp.schedlink = l.head
6012 l.head.set(gp)
6013 }
6014
6015
6016 func (l *gList) pushAll(q gQueue) {
6017 if !q.empty() {
6018 q.tail.ptr().schedlink = l.head
6019 l.head = q.head
6020 }
6021 }
6022
6023
6024 func (l *gList) pop() *g {
6025 gp := l.head.ptr()
6026 if gp != nil {
6027 l.head = gp.schedlink
6028 }
6029 return gp
6030 }
6031
6032
6033 func setMaxThreads(in int) (out int) {
6034 lock(&sched.lock)
6035 out = int(sched.maxmcount)
6036 if in > 0x7fffffff {
6037 sched.maxmcount = 0x7fffffff
6038 } else {
6039 sched.maxmcount = int32(in)
6040 }
6041 checkmcount()
6042 unlock(&sched.lock)
6043 return
6044 }
6045
6046
6047 func procPin() int {
6048 _g_ := getg()
6049 mp := _g_.m
6050
6051 mp.locks++
6052 return int(mp.p.ptr().id)
6053 }
6054
6055
6056 func procUnpin() {
6057 _g_ := getg()
6058 _g_.m.locks--
6059 }
6060
6061
6062
6063 func sync_runtime_procPin() int {
6064 return procPin()
6065 }
6066
6067
6068
6069 func sync_runtime_procUnpin() {
6070 procUnpin()
6071 }
6072
6073
6074
6075 func sync_atomic_runtime_procPin() int {
6076 return procPin()
6077 }
6078
6079
6080
6081 func sync_atomic_runtime_procUnpin() {
6082 procUnpin()
6083 }
6084
6085
6086
6087
6088 func sync_runtime_canSpin(i int) bool {
6089
6090
6091
6092
6093
6094 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
6095 return false
6096 }
6097 if p := getg().m.p.ptr(); !runqempty(p) {
6098 return false
6099 }
6100 return true
6101 }
6102
6103
6104
6105 func sync_runtime_doSpin() {
6106 procyield(active_spin_cnt)
6107 }
6108
6109 var stealOrder randomOrder
6110
6111
6112
6113
6114
6115 type randomOrder struct {
6116 count uint32
6117 coprimes []uint32
6118 }
6119
6120 type randomEnum struct {
6121 i uint32
6122 count uint32
6123 pos uint32
6124 inc uint32
6125 }
6126
6127 func (ord *randomOrder) reset(count uint32) {
6128 ord.count = count
6129 ord.coprimes = ord.coprimes[:0]
6130 for i := uint32(1); i <= count; i++ {
6131 if gcd(i, count) == 1 {
6132 ord.coprimes = append(ord.coprimes, i)
6133 }
6134 }
6135 }
6136
6137 func (ord *randomOrder) start(i uint32) randomEnum {
6138 return randomEnum{
6139 count: ord.count,
6140 pos: i % ord.count,
6141 inc: ord.coprimes[i%uint32(len(ord.coprimes))],
6142 }
6143 }
6144
6145 func (enum *randomEnum) done() bool {
6146 return enum.i == enum.count
6147 }
6148
6149 func (enum *randomEnum) next() {
6150 enum.i++
6151 enum.pos = (enum.pos + enum.inc) % enum.count
6152 }
6153
6154 func (enum *randomEnum) position() uint32 {
6155 return enum.pos
6156 }
6157
6158 func gcd(a, b uint32) uint32 {
6159 for b != 0 {
6160 a, b = b, a%b
6161 }
6162 return a
6163 }
6164
6165
6166
6167 type initTask struct {
6168
6169 state uintptr
6170 ndeps uintptr
6171 nfns uintptr
6172
6173
6174 }
6175
6176
6177
6178 var inittrace tracestat
6179
6180 type tracestat struct {
6181 active bool
6182 id int64
6183 allocs uint64
6184 bytes uint64
6185 }
6186
6187 func doInit(t *initTask) {
6188 switch t.state {
6189 case 2:
6190 return
6191 case 1:
6192 throw("recursive call during initialization - linker skew")
6193 default:
6194 t.state = 1
6195
6196 for i := uintptr(0); i < t.ndeps; i++ {
6197 p := add(unsafe.Pointer(t), (3+i)*goarch.PtrSize)
6198 t2 := *(**initTask)(p)
6199 doInit(t2)
6200 }
6201
6202 if t.nfns == 0 {
6203 t.state = 2
6204 return
6205 }
6206
6207 var (
6208 start int64
6209 before tracestat
6210 )
6211
6212 if inittrace.active {
6213 start = nanotime()
6214
6215 before = inittrace
6216 }
6217
6218 firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*goarch.PtrSize)
6219 for i := uintptr(0); i < t.nfns; i++ {
6220 p := add(firstFunc, i*goarch.PtrSize)
6221 f := *(*func())(unsafe.Pointer(&p))
6222 f()
6223 }
6224
6225 if inittrace.active {
6226 end := nanotime()
6227
6228 after := inittrace
6229
6230 f := *(*func())(unsafe.Pointer(&firstFunc))
6231 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
6232
6233 var sbuf [24]byte
6234 print("init ", pkg, " @")
6235 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
6236 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
6237 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
6238 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
6239 print("\n")
6240 }
6241
6242 t.state = 2
6243 }
6244 }
6245
View as plain text