Source file
src/runtime/trace.go
1
2
3
4
5
6
7
8
9
10
11
12
13 package runtime
14
15 import (
16 "internal/goarch"
17 "runtime/internal/atomic"
18 "runtime/internal/sys"
19 "unsafe"
20 )
21
22
23 const (
24 traceEvNone = 0
25 traceEvBatch = 1
26 traceEvFrequency = 2
27 traceEvStack = 3
28 traceEvGomaxprocs = 4
29 traceEvProcStart = 5
30 traceEvProcStop = 6
31 traceEvGCStart = 7
32 traceEvGCDone = 8
33 traceEvGCSTWStart = 9
34 traceEvGCSTWDone = 10
35 traceEvGCSweepStart = 11
36 traceEvGCSweepDone = 12
37 traceEvGoCreate = 13
38 traceEvGoStart = 14
39 traceEvGoEnd = 15
40 traceEvGoStop = 16
41 traceEvGoSched = 17
42 traceEvGoPreempt = 18
43 traceEvGoSleep = 19
44 traceEvGoBlock = 20
45 traceEvGoUnblock = 21
46 traceEvGoBlockSend = 22
47 traceEvGoBlockRecv = 23
48 traceEvGoBlockSelect = 24
49 traceEvGoBlockSync = 25
50 traceEvGoBlockCond = 26
51 traceEvGoBlockNet = 27
52 traceEvGoSysCall = 28
53 traceEvGoSysExit = 29
54 traceEvGoSysBlock = 30
55 traceEvGoWaiting = 31
56 traceEvGoInSyscall = 32
57 traceEvHeapAlloc = 33
58 traceEvHeapGoal = 34
59 traceEvTimerGoroutine = 35
60 traceEvFutileWakeup = 36
61 traceEvString = 37
62 traceEvGoStartLocal = 38
63 traceEvGoUnblockLocal = 39
64 traceEvGoSysExitLocal = 40
65 traceEvGoStartLabel = 41
66 traceEvGoBlockGC = 42
67 traceEvGCMarkAssistStart = 43
68 traceEvGCMarkAssistDone = 44
69 traceEvUserTaskCreate = 45
70 traceEvUserTaskEnd = 46
71 traceEvUserRegion = 47
72 traceEvUserLog = 48
73 traceEvCount = 49
74
75
76
77 )
78
79 const (
80
81
82
83
84
85
86
87
88
89 traceTickDiv = 16 + 48*(goarch.Is386|goarch.IsAmd64)
90
91
92
93 traceStackSize = 128
94
95 traceGlobProc = -1
96
97 traceBytesPerNumber = 10
98
99 traceArgCountShift = 6
100
101
102
103
104
105
106 traceFutileWakeup byte = 128
107 )
108
109
110 var trace struct {
111 lock mutex
112 lockOwner *g
113 enabled bool
114 shutdown bool
115 headerWritten bool
116 footerWritten bool
117 shutdownSema uint32
118 seqStart uint64
119 ticksStart int64
120 ticksEnd int64
121 timeStart int64
122 timeEnd int64
123 seqGC uint64
124 reading traceBufPtr
125 empty traceBufPtr
126 fullHead traceBufPtr
127 fullTail traceBufPtr
128 reader guintptr
129 stackTab traceStackTable
130
131
132
133
134
135
136
137 stringsLock mutex
138 strings map[string]uint64
139 stringSeq uint64
140
141
142 markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
143
144 bufLock mutex
145 buf traceBufPtr
146 }
147
148
149 type traceBufHeader struct {
150 link traceBufPtr
151 lastTicks uint64
152 pos int
153 stk [traceStackSize]uintptr
154 }
155
156
157
158
159 type traceBuf struct {
160 traceBufHeader
161 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte
162 }
163
164
165
166
167
168
169
170
171 type traceBufPtr uintptr
172
173 func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
174 func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
175 func traceBufPtrOf(b *traceBuf) traceBufPtr {
176 return traceBufPtr(unsafe.Pointer(b))
177 }
178
179
180
181
182
183
184 func StartTrace() error {
185
186
187
188
189
190 stopTheWorldGC("start tracing")
191
192
193 lock(&sched.sysmonlock)
194
195
196
197
198
199
200 lock(&trace.bufLock)
201
202 if trace.enabled || trace.shutdown {
203 unlock(&trace.bufLock)
204 unlock(&sched.sysmonlock)
205 startTheWorldGC()
206 return errorString("tracing is already enabled")
207 }
208
209
210
211
212
213
214
215
216 _g_ := getg()
217 _g_.m.startingtrace = true
218
219
220 mp := acquirem()
221 stkBuf := make([]uintptr, traceStackSize)
222 stackID := traceStackID(mp, stkBuf, 2)
223 releasem(mp)
224
225
226 forEachGRace(func(gp *g) {
227 status := readgstatus(gp)
228 if status != _Gdead {
229 gp.traceseq = 0
230 gp.tracelastp = getg().m.p
231
232 id := trace.stackTab.put([]uintptr{startPCforTrace(gp.startpc) + sys.PCQuantum})
233 traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
234 }
235 if status == _Gwaiting {
236
237 gp.traceseq++
238 traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
239 }
240 if status == _Gsyscall {
241 gp.traceseq++
242 traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
243 } else {
244 gp.sysblocktraced = false
245 }
246 })
247 traceProcStart()
248 traceGoStart()
249
250
251
252
253 trace.ticksStart = cputicks()
254 trace.timeStart = nanotime()
255 trace.headerWritten = false
256 trace.footerWritten = false
257
258
259
260
261 trace.stringSeq = 0
262 trace.strings = make(map[string]uint64)
263
264 trace.seqGC = 0
265 _g_.m.startingtrace = false
266 trace.enabled = true
267
268
269 _, pid, bufp := traceAcquireBuffer()
270 for i, label := range gcMarkWorkerModeStrings[:] {
271 trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
272 }
273 traceReleaseBuffer(pid)
274
275 unlock(&trace.bufLock)
276
277 unlock(&sched.sysmonlock)
278
279 startTheWorldGC()
280 return nil
281 }
282
283
284
285 func StopTrace() {
286
287
288 stopTheWorldGC("stop tracing")
289
290
291 lock(&sched.sysmonlock)
292
293
294 lock(&trace.bufLock)
295
296 if !trace.enabled {
297 unlock(&trace.bufLock)
298 unlock(&sched.sysmonlock)
299 startTheWorldGC()
300 return
301 }
302
303 traceGoSched()
304
305
306
307 for _, p := range allp[:cap(allp)] {
308 buf := p.tracebuf
309 if buf != 0 {
310 traceFullQueue(buf)
311 p.tracebuf = 0
312 }
313 }
314 if trace.buf != 0 {
315 buf := trace.buf
316 trace.buf = 0
317 if buf.ptr().pos != 0 {
318 traceFullQueue(buf)
319 }
320 }
321
322 for {
323 trace.ticksEnd = cputicks()
324 trace.timeEnd = nanotime()
325
326 if trace.timeEnd != trace.timeStart {
327 break
328 }
329 osyield()
330 }
331
332 trace.enabled = false
333 trace.shutdown = true
334 unlock(&trace.bufLock)
335
336 unlock(&sched.sysmonlock)
337
338 startTheWorldGC()
339
340
341
342 semacquire(&trace.shutdownSema)
343 if raceenabled {
344 raceacquire(unsafe.Pointer(&trace.shutdownSema))
345 }
346
347
348 lock(&trace.lock)
349 for _, p := range allp[:cap(allp)] {
350 if p.tracebuf != 0 {
351 throw("trace: non-empty trace buffer in proc")
352 }
353 }
354 if trace.buf != 0 {
355 throw("trace: non-empty global trace buffer")
356 }
357 if trace.fullHead != 0 || trace.fullTail != 0 {
358 throw("trace: non-empty full trace buffer")
359 }
360 if trace.reading != 0 || trace.reader != 0 {
361 throw("trace: reading after shutdown")
362 }
363 for trace.empty != 0 {
364 buf := trace.empty
365 trace.empty = buf.ptr().link
366 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
367 }
368 trace.strings = nil
369 trace.shutdown = false
370 unlock(&trace.lock)
371 }
372
373
374
375
376
377
378 func ReadTrace() []byte {
379
380
381
382
383
384
385 lock(&trace.lock)
386 trace.lockOwner = getg()
387
388 if trace.reader != 0 {
389
390
391
392 trace.lockOwner = nil
393 unlock(&trace.lock)
394 println("runtime: ReadTrace called from multiple goroutines simultaneously")
395 return nil
396 }
397
398 if buf := trace.reading; buf != 0 {
399 buf.ptr().link = trace.empty
400 trace.empty = buf
401 trace.reading = 0
402 }
403
404 if !trace.headerWritten {
405 trace.headerWritten = true
406 trace.lockOwner = nil
407 unlock(&trace.lock)
408 return []byte("go 1.11 trace\x00\x00\x00")
409 }
410
411 if trace.fullHead == 0 && !trace.shutdown {
412 trace.reader.set(getg())
413 goparkunlock(&trace.lock, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
414 lock(&trace.lock)
415 }
416
417 if trace.fullHead != 0 {
418 buf := traceFullDequeue()
419 trace.reading = buf
420 trace.lockOwner = nil
421 unlock(&trace.lock)
422 return buf.ptr().arr[:buf.ptr().pos]
423 }
424
425 if !trace.footerWritten {
426 trace.footerWritten = true
427
428 freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
429 if freq <= 0 {
430 throw("trace: ReadTrace got invalid frequency")
431 }
432 trace.lockOwner = nil
433 unlock(&trace.lock)
434 var data []byte
435 data = append(data, traceEvFrequency|0<<traceArgCountShift)
436 data = traceAppend(data, uint64(freq))
437
438
439 trace.stackTab.dump()
440 return data
441 }
442
443 if trace.shutdown {
444 trace.lockOwner = nil
445 unlock(&trace.lock)
446 if raceenabled {
447
448
449
450 racerelease(unsafe.Pointer(&trace.shutdownSema))
451 }
452
453 semrelease(&trace.shutdownSema)
454 return nil
455 }
456
457 trace.lockOwner = nil
458 unlock(&trace.lock)
459 println("runtime: spurious wakeup of trace reader")
460 return nil
461 }
462
463
464 func traceReader() *g {
465 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
466 return nil
467 }
468 lock(&trace.lock)
469 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
470 unlock(&trace.lock)
471 return nil
472 }
473 gp := trace.reader.ptr()
474 trace.reader.set(nil)
475 unlock(&trace.lock)
476 return gp
477 }
478
479
480 func traceProcFree(pp *p) {
481 buf := pp.tracebuf
482 pp.tracebuf = 0
483 if buf == 0 {
484 return
485 }
486 lock(&trace.lock)
487 traceFullQueue(buf)
488 unlock(&trace.lock)
489 }
490
491
492 func traceFullQueue(buf traceBufPtr) {
493 buf.ptr().link = 0
494 if trace.fullHead == 0 {
495 trace.fullHead = buf
496 } else {
497 trace.fullTail.ptr().link = buf
498 }
499 trace.fullTail = buf
500 }
501
502
503 func traceFullDequeue() traceBufPtr {
504 buf := trace.fullHead
505 if buf == 0 {
506 return 0
507 }
508 trace.fullHead = buf.ptr().link
509 if trace.fullHead == 0 {
510 trace.fullTail = 0
511 }
512 buf.ptr().link = 0
513 return buf
514 }
515
516
517
518
519
520
521 func traceEvent(ev byte, skip int, args ...uint64) {
522 mp, pid, bufp := traceAcquireBuffer()
523
524
525
526
527
528
529
530
531
532
533
534 if !trace.enabled && !mp.startingtrace {
535 traceReleaseBuffer(pid)
536 return
537 }
538
539 if skip > 0 {
540 if getg() == mp.curg {
541 skip++
542 }
543 }
544 traceEventLocked(0, mp, pid, bufp, ev, skip, args...)
545 traceReleaseBuffer(pid)
546 }
547
548 func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, skip int, args ...uint64) {
549 buf := bufp.ptr()
550
551 maxSize := 2 + 5*traceBytesPerNumber + extraBytes
552 if buf == nil || len(buf.arr)-buf.pos < maxSize {
553 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
554 bufp.set(buf)
555 }
556
557
558
559 ticks := uint64(cputicks()) / traceTickDiv
560 tickDiff := ticks - buf.lastTicks
561 if tickDiff == 0 {
562 ticks = buf.lastTicks + 1
563 tickDiff = 1
564 }
565
566 buf.lastTicks = ticks
567 narg := byte(len(args))
568 if skip >= 0 {
569 narg++
570 }
571
572
573 if narg > 3 {
574 narg = 3
575 }
576 startPos := buf.pos
577 buf.byte(ev | narg<<traceArgCountShift)
578 var lenp *byte
579 if narg == 3 {
580
581 buf.varint(0)
582 lenp = &buf.arr[buf.pos-1]
583 }
584 buf.varint(tickDiff)
585 for _, a := range args {
586 buf.varint(a)
587 }
588 if skip == 0 {
589 buf.varint(0)
590 } else if skip > 0 {
591 buf.varint(traceStackID(mp, buf.stk[:], skip))
592 }
593 evSize := buf.pos - startPos
594 if evSize > maxSize {
595 throw("invalid length of trace event")
596 }
597 if lenp != nil {
598
599 *lenp = byte(evSize - 2)
600 }
601 }
602
603 func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
604 _g_ := getg()
605 gp := mp.curg
606 var nstk int
607 if gp == _g_ {
608 nstk = callers(skip+1, buf)
609 } else if gp != nil {
610 gp = mp.curg
611 nstk = gcallers(gp, skip, buf)
612 }
613 if nstk > 0 {
614 nstk--
615 }
616 if nstk > 0 && gp.goid == 1 {
617 nstk--
618 }
619 id := trace.stackTab.put(buf[:nstk])
620 return uint64(id)
621 }
622
623
624 func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
625 mp = acquirem()
626 if p := mp.p.ptr(); p != nil {
627 return mp, p.id, &p.tracebuf
628 }
629 lock(&trace.bufLock)
630 return mp, traceGlobProc, &trace.buf
631 }
632
633
634 func traceReleaseBuffer(pid int32) {
635 if pid == traceGlobProc {
636 unlock(&trace.bufLock)
637 }
638 releasem(getg().m)
639 }
640
641
642 func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
643 owner := trace.lockOwner
644 dolock := owner == nil || owner != getg().m.curg
645 if dolock {
646 lock(&trace.lock)
647 }
648 if buf != 0 {
649 traceFullQueue(buf)
650 }
651 if trace.empty != 0 {
652 buf = trace.empty
653 trace.empty = buf.ptr().link
654 } else {
655 buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
656 if buf == 0 {
657 throw("trace: out of memory")
658 }
659 }
660 bufp := buf.ptr()
661 bufp.link.set(nil)
662 bufp.pos = 0
663
664
665 ticks := uint64(cputicks()) / traceTickDiv
666 if ticks == bufp.lastTicks {
667 ticks = bufp.lastTicks + 1
668 }
669 bufp.lastTicks = ticks
670 bufp.byte(traceEvBatch | 1<<traceArgCountShift)
671 bufp.varint(uint64(pid))
672 bufp.varint(ticks)
673
674 if dolock {
675 unlock(&trace.lock)
676 }
677 return buf
678 }
679
680
681 func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
682 if s == "" {
683 return 0, bufp
684 }
685
686 lock(&trace.stringsLock)
687 if raceenabled {
688
689
690 raceacquire(unsafe.Pointer(&trace.stringsLock))
691 }
692
693 if id, ok := trace.strings[s]; ok {
694 if raceenabled {
695 racerelease(unsafe.Pointer(&trace.stringsLock))
696 }
697 unlock(&trace.stringsLock)
698
699 return id, bufp
700 }
701
702 trace.stringSeq++
703 id := trace.stringSeq
704 trace.strings[s] = id
705
706 if raceenabled {
707 racerelease(unsafe.Pointer(&trace.stringsLock))
708 }
709 unlock(&trace.stringsLock)
710
711
712
713
714
715
716 buf := bufp.ptr()
717 size := 1 + 2*traceBytesPerNumber + len(s)
718 if buf == nil || len(buf.arr)-buf.pos < size {
719 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
720 bufp.set(buf)
721 }
722 buf.byte(traceEvString)
723 buf.varint(id)
724
725
726
727 slen := len(s)
728 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
729 slen = room
730 }
731
732 buf.varint(uint64(slen))
733 buf.pos += copy(buf.arr[buf.pos:], s[:slen])
734
735 bufp.set(buf)
736 return id, bufp
737 }
738
739
740 func traceAppend(buf []byte, v uint64) []byte {
741 for ; v >= 0x80; v >>= 7 {
742 buf = append(buf, 0x80|byte(v))
743 }
744 buf = append(buf, byte(v))
745 return buf
746 }
747
748
749 func (buf *traceBuf) varint(v uint64) {
750 pos := buf.pos
751 for ; v >= 0x80; v >>= 7 {
752 buf.arr[pos] = 0x80 | byte(v)
753 pos++
754 }
755 buf.arr[pos] = byte(v)
756 pos++
757 buf.pos = pos
758 }
759
760
761 func (buf *traceBuf) byte(v byte) {
762 buf.arr[buf.pos] = v
763 buf.pos++
764 }
765
766
767
768 type traceStackTable struct {
769 lock mutex
770 seq uint32
771 mem traceAlloc
772 tab [1 << 13]traceStackPtr
773 }
774
775
776 type traceStack struct {
777 link traceStackPtr
778 hash uintptr
779 id uint32
780 n int
781 stk [0]uintptr
782 }
783
784 type traceStackPtr uintptr
785
786 func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
787
788
789 func (ts *traceStack) stack() []uintptr {
790 return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
791 }
792
793
794
795 func (tab *traceStackTable) put(pcs []uintptr) uint32 {
796 if len(pcs) == 0 {
797 return 0
798 }
799 hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
800
801 if id := tab.find(pcs, hash); id != 0 {
802 return id
803 }
804
805 lock(&tab.lock)
806 if id := tab.find(pcs, hash); id != 0 {
807 unlock(&tab.lock)
808 return id
809 }
810
811 tab.seq++
812 stk := tab.newStack(len(pcs))
813 stk.hash = hash
814 stk.id = tab.seq
815 stk.n = len(pcs)
816 stkpc := stk.stack()
817 for i, pc := range pcs {
818 stkpc[i] = pc
819 }
820 part := int(hash % uintptr(len(tab.tab)))
821 stk.link = tab.tab[part]
822 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
823 unlock(&tab.lock)
824 return stk.id
825 }
826
827
828 func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
829 part := int(hash % uintptr(len(tab.tab)))
830 Search:
831 for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
832 if stk.hash == hash && stk.n == len(pcs) {
833 for i, stkpc := range stk.stack() {
834 if stkpc != pcs[i] {
835 continue Search
836 }
837 }
838 return stk.id
839 }
840 }
841 return 0
842 }
843
844
845 func (tab *traceStackTable) newStack(n int) *traceStack {
846 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
847 }
848
849
850 func allFrames(pcs []uintptr) []Frame {
851 frames := make([]Frame, 0, len(pcs))
852 ci := CallersFrames(pcs)
853 for {
854 f, more := ci.Next()
855 frames = append(frames, f)
856 if !more {
857 return frames
858 }
859 }
860 }
861
862
863
864 func (tab *traceStackTable) dump() {
865 var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
866 bufp := traceFlush(0, 0)
867 for _, stk := range tab.tab {
868 stk := stk.ptr()
869 for ; stk != nil; stk = stk.link.ptr() {
870 tmpbuf := tmp[:0]
871 tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
872 frames := allFrames(stk.stack())
873 tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
874 for _, f := range frames {
875 var frame traceFrame
876 frame, bufp = traceFrameForPC(bufp, 0, f)
877 tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
878 tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
879 tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
880 tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
881 }
882
883 size := 1 + traceBytesPerNumber + len(tmpbuf)
884 if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
885 bufp = traceFlush(bufp, 0)
886 }
887 buf := bufp.ptr()
888 buf.byte(traceEvStack | 3<<traceArgCountShift)
889 buf.varint(uint64(len(tmpbuf)))
890 buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
891 }
892 }
893
894 lock(&trace.lock)
895 traceFullQueue(bufp)
896 unlock(&trace.lock)
897
898 tab.mem.drop()
899 *tab = traceStackTable{}
900 lockInit(&((*tab).lock), lockRankTraceStackTab)
901 }
902
903 type traceFrame struct {
904 funcID uint64
905 fileID uint64
906 line uint64
907 }
908
909
910
911 func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
912 bufp := &buf
913 var frame traceFrame
914
915 fn := f.Function
916 const maxLen = 1 << 10
917 if len(fn) > maxLen {
918 fn = fn[len(fn)-maxLen:]
919 }
920 frame.funcID, bufp = traceString(bufp, pid, fn)
921 frame.line = uint64(f.Line)
922 file := f.File
923 if len(file) > maxLen {
924 file = file[len(file)-maxLen:]
925 }
926 frame.fileID, bufp = traceString(bufp, pid, file)
927 return frame, (*bufp)
928 }
929
930
931
932 type traceAlloc struct {
933 head traceAllocBlockPtr
934 off uintptr
935 }
936
937
938
939
940
941
942
943
944 type traceAllocBlock struct {
945 next traceAllocBlockPtr
946 data [64<<10 - goarch.PtrSize]byte
947 }
948
949
950 type traceAllocBlockPtr uintptr
951
952 func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
953 func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
954
955
956 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
957 n = alignUp(n, goarch.PtrSize)
958 if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
959 if n > uintptr(len(a.head.ptr().data)) {
960 throw("trace: alloc too large")
961 }
962 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
963 if block == nil {
964 throw("trace: out of memory")
965 }
966 block.next.set(a.head.ptr())
967 a.head.set(block)
968 a.off = 0
969 }
970 p := &a.head.ptr().data[a.off]
971 a.off += n
972 return unsafe.Pointer(p)
973 }
974
975
976 func (a *traceAlloc) drop() {
977 for a.head != 0 {
978 block := a.head.ptr()
979 a.head.set(block.next.ptr())
980 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
981 }
982 }
983
984
985
986 func traceGomaxprocs(procs int32) {
987 traceEvent(traceEvGomaxprocs, 1, uint64(procs))
988 }
989
990 func traceProcStart() {
991 traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
992 }
993
994 func traceProcStop(pp *p) {
995
996
997 mp := acquirem()
998 oldp := mp.p
999 mp.p.set(pp)
1000 traceEvent(traceEvProcStop, -1)
1001 mp.p = oldp
1002 releasem(mp)
1003 }
1004
1005 func traceGCStart() {
1006 traceEvent(traceEvGCStart, 3, trace.seqGC)
1007 trace.seqGC++
1008 }
1009
1010 func traceGCDone() {
1011 traceEvent(traceEvGCDone, -1)
1012 }
1013
1014 func traceGCSTWStart(kind int) {
1015 traceEvent(traceEvGCSTWStart, -1, uint64(kind))
1016 }
1017
1018 func traceGCSTWDone() {
1019 traceEvent(traceEvGCSTWDone, -1)
1020 }
1021
1022
1023
1024
1025
1026
1027 func traceGCSweepStart() {
1028
1029
1030 _p_ := getg().m.p.ptr()
1031 if _p_.traceSweep {
1032 throw("double traceGCSweepStart")
1033 }
1034 _p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
1035 }
1036
1037
1038
1039
1040
1041 func traceGCSweepSpan(bytesSwept uintptr) {
1042 _p_ := getg().m.p.ptr()
1043 if _p_.traceSweep {
1044 if _p_.traceSwept == 0 {
1045 traceEvent(traceEvGCSweepStart, 1)
1046 }
1047 _p_.traceSwept += bytesSwept
1048 }
1049 }
1050
1051 func traceGCSweepDone() {
1052 _p_ := getg().m.p.ptr()
1053 if !_p_.traceSweep {
1054 throw("missing traceGCSweepStart")
1055 }
1056 if _p_.traceSwept != 0 {
1057 traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
1058 }
1059 _p_.traceSweep = false
1060 }
1061
1062 func traceGCMarkAssistStart() {
1063 traceEvent(traceEvGCMarkAssistStart, 1)
1064 }
1065
1066 func traceGCMarkAssistDone() {
1067 traceEvent(traceEvGCMarkAssistDone, -1)
1068 }
1069
1070 func traceGoCreate(newg *g, pc uintptr) {
1071 newg.traceseq = 0
1072 newg.tracelastp = getg().m.p
1073
1074 id := trace.stackTab.put([]uintptr{startPCforTrace(pc) + sys.PCQuantum})
1075 traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
1076 }
1077
1078 func traceGoStart() {
1079 _g_ := getg().m.curg
1080 _p_ := _g_.m.p
1081 _g_.traceseq++
1082 if _p_.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
1083 traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
1084 } else if _g_.tracelastp == _p_ {
1085 traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
1086 } else {
1087 _g_.tracelastp = _p_
1088 traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
1089 }
1090 }
1091
1092 func traceGoEnd() {
1093 traceEvent(traceEvGoEnd, -1)
1094 }
1095
1096 func traceGoSched() {
1097 _g_ := getg()
1098 _g_.tracelastp = _g_.m.p
1099 traceEvent(traceEvGoSched, 1)
1100 }
1101
1102 func traceGoPreempt() {
1103 _g_ := getg()
1104 _g_.tracelastp = _g_.m.p
1105 traceEvent(traceEvGoPreempt, 1)
1106 }
1107
1108 func traceGoPark(traceEv byte, skip int) {
1109 if traceEv&traceFutileWakeup != 0 {
1110 traceEvent(traceEvFutileWakeup, -1)
1111 }
1112 traceEvent(traceEv & ^traceFutileWakeup, skip)
1113 }
1114
1115 func traceGoUnpark(gp *g, skip int) {
1116 _p_ := getg().m.p
1117 gp.traceseq++
1118 if gp.tracelastp == _p_ {
1119 traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
1120 } else {
1121 gp.tracelastp = _p_
1122 traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
1123 }
1124 }
1125
1126 func traceGoSysCall() {
1127 traceEvent(traceEvGoSysCall, 1)
1128 }
1129
1130 func traceGoSysExit(ts int64) {
1131 if ts != 0 && ts < trace.ticksStart {
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141 ts = 0
1142 }
1143 _g_ := getg().m.curg
1144 _g_.traceseq++
1145 _g_.tracelastp = _g_.m.p
1146 traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
1147 }
1148
1149 func traceGoSysBlock(pp *p) {
1150
1151
1152 mp := acquirem()
1153 oldp := mp.p
1154 mp.p.set(pp)
1155 traceEvent(traceEvGoSysBlock, -1)
1156 mp.p = oldp
1157 releasem(mp)
1158 }
1159
1160 func traceHeapAlloc() {
1161 traceEvent(traceEvHeapAlloc, -1, gcController.heapLive)
1162 }
1163
1164 func traceHeapGoal() {
1165 if heapGoal := atomic.Load64(&gcController.heapGoal); heapGoal == ^uint64(0) {
1166
1167 traceEvent(traceEvHeapGoal, -1, 0)
1168 } else {
1169 traceEvent(traceEvHeapGoal, -1, heapGoal)
1170 }
1171 }
1172
1173
1174
1175
1176
1177 func trace_userTaskCreate(id, parentID uint64, taskType string) {
1178 if !trace.enabled {
1179 return
1180 }
1181
1182
1183 mp, pid, bufp := traceAcquireBuffer()
1184 if !trace.enabled && !mp.startingtrace {
1185 traceReleaseBuffer(pid)
1186 return
1187 }
1188
1189 typeStringID, bufp := traceString(bufp, pid, taskType)
1190 traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 3, id, parentID, typeStringID)
1191 traceReleaseBuffer(pid)
1192 }
1193
1194
1195 func trace_userTaskEnd(id uint64) {
1196 traceEvent(traceEvUserTaskEnd, 2, id)
1197 }
1198
1199
1200 func trace_userRegion(id, mode uint64, name string) {
1201 if !trace.enabled {
1202 return
1203 }
1204
1205 mp, pid, bufp := traceAcquireBuffer()
1206 if !trace.enabled && !mp.startingtrace {
1207 traceReleaseBuffer(pid)
1208 return
1209 }
1210
1211 nameStringID, bufp := traceString(bufp, pid, name)
1212 traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 3, id, mode, nameStringID)
1213 traceReleaseBuffer(pid)
1214 }
1215
1216
1217 func trace_userLog(id uint64, category, message string) {
1218 if !trace.enabled {
1219 return
1220 }
1221
1222 mp, pid, bufp := traceAcquireBuffer()
1223 if !trace.enabled && !mp.startingtrace {
1224 traceReleaseBuffer(pid)
1225 return
1226 }
1227
1228 categoryID, bufp := traceString(bufp, pid, category)
1229
1230 extraSpace := traceBytesPerNumber + len(message)
1231 traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 3, id, categoryID)
1232
1233
1234 buf := bufp.ptr()
1235
1236
1237
1238 slen := len(message)
1239 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
1240 slen = room
1241 }
1242 buf.varint(uint64(slen))
1243 buf.pos += copy(buf.arr[buf.pos:], message[:slen])
1244
1245 traceReleaseBuffer(pid)
1246 }
1247
1248
1249
1250 func startPCforTrace(pc uintptr) uintptr {
1251 f := findfunc(pc)
1252 if !f.valid() {
1253 return pc
1254 }
1255 w := funcdata(f, _FUNCDATA_WrapInfo)
1256 if w == nil {
1257 return pc
1258 }
1259 return f.datap.textAddr(*(*uint32)(w))
1260 }
1261
View as plain text