Source file src/runtime/mwbbuf.go

     1  // Copyright 2017 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // This implements the write barrier buffer. The write barrier itself
     6  // is gcWriteBarrier and is implemented in assembly.
     7  //
     8  // See mbarrier.go for algorithmic details on the write barrier. This
     9  // file deals only with the buffer.
    10  //
    11  // The write barrier has a fast path and a slow path. The fast path
    12  // simply enqueues to a per-P write barrier buffer. It's written in
    13  // assembly and doesn't clobber any general purpose registers, so it
    14  // doesn't have the usual overheads of a Go call.
    15  //
    16  // When the buffer fills up, the write barrier invokes the slow path
    17  // (wbBufFlush) to flush the buffer to the GC work queues. In this
    18  // path, since the compiler didn't spill registers, we spill *all*
    19  // registers and disallow any GC safe points that could observe the
    20  // stack frame (since we don't know the types of the spilled
    21  // registers).
    22  
    23  package runtime
    24  
    25  import (
    26  	"internal/goarch"
    27  	"runtime/internal/atomic"
    28  	"unsafe"
    29  )
    30  
    31  // testSmallBuf forces a small write barrier buffer to stress write
    32  // barrier flushing.
    33  const testSmallBuf = false
    34  
    35  // wbBuf is a per-P buffer of pointers queued by the write barrier.
    36  // This buffer is flushed to the GC workbufs when it fills up and on
    37  // various GC transitions.
    38  //
    39  // This is closely related to a "sequential store buffer" (SSB),
    40  // except that SSBs are usually used for maintaining remembered sets,
    41  // while this is used for marking.
    42  type wbBuf struct {
    43  	// next points to the next slot in buf. It must not be a
    44  	// pointer type because it can point past the end of buf and
    45  	// must be updated without write barriers.
    46  	//
    47  	// This is a pointer rather than an index to optimize the
    48  	// write barrier assembly.
    49  	next uintptr
    50  
    51  	// end points to just past the end of buf. It must not be a
    52  	// pointer type because it points past the end of buf and must
    53  	// be updated without write barriers.
    54  	end uintptr
    55  
    56  	// buf stores a series of pointers to execute write barriers
    57  	// on. This must be a multiple of wbBufEntryPointers because
    58  	// the write barrier only checks for overflow once per entry.
    59  	buf [wbBufEntryPointers * wbBufEntries]uintptr
    60  }
    61  
    62  const (
    63  	// wbBufEntries is the number of write barriers between
    64  	// flushes of the write barrier buffer.
    65  	//
    66  	// This trades latency for throughput amortization. Higher
    67  	// values amortize flushing overhead more, but increase the
    68  	// latency of flushing. Higher values also increase the cache
    69  	// footprint of the buffer.
    70  	//
    71  	// TODO: What is the latency cost of this? Tune this value.
    72  	wbBufEntries = 256
    73  
    74  	// wbBufEntryPointers is the number of pointers added to the
    75  	// buffer by each write barrier.
    76  	wbBufEntryPointers = 2
    77  )
    78  
    79  // reset empties b by resetting its next and end pointers.
    80  func (b *wbBuf) reset() {
    81  	start := uintptr(unsafe.Pointer(&b.buf[0]))
    82  	b.next = start
    83  	if writeBarrier.cgo {
    84  		// Effectively disable the buffer by forcing a flush
    85  		// on every barrier.
    86  		b.end = uintptr(unsafe.Pointer(&b.buf[wbBufEntryPointers]))
    87  	} else if testSmallBuf {
    88  		// For testing, allow two barriers in the buffer. If
    89  		// we only did one, then barriers of non-heap pointers
    90  		// would be no-ops. This lets us combine a buffered
    91  		// barrier with a flush at a later time.
    92  		b.end = uintptr(unsafe.Pointer(&b.buf[2*wbBufEntryPointers]))
    93  	} else {
    94  		b.end = start + uintptr(len(b.buf))*unsafe.Sizeof(b.buf[0])
    95  	}
    96  
    97  	if (b.end-b.next)%(wbBufEntryPointers*unsafe.Sizeof(b.buf[0])) != 0 {
    98  		throw("bad write barrier buffer bounds")
    99  	}
   100  }
   101  
   102  // discard resets b's next pointer, but not its end pointer.
   103  //
   104  // This must be nosplit because it's called by wbBufFlush.
   105  //
   106  //go:nosplit
   107  func (b *wbBuf) discard() {
   108  	b.next = uintptr(unsafe.Pointer(&b.buf[0]))
   109  }
   110  
   111  // empty reports whether b contains no pointers.
   112  func (b *wbBuf) empty() bool {
   113  	return b.next == uintptr(unsafe.Pointer(&b.buf[0]))
   114  }
   115  
   116  // putFast adds old and new to the write barrier buffer and returns
   117  // false if a flush is necessary. Callers should use this as:
   118  //
   119  //     buf := &getg().m.p.ptr().wbBuf
   120  //     if !buf.putFast(old, new) {
   121  //         wbBufFlush(...)
   122  //     }
   123  //     ... actual memory write ...
   124  //
   125  // The arguments to wbBufFlush depend on whether the caller is doing
   126  // its own cgo pointer checks. If it is, then this can be
   127  // wbBufFlush(nil, 0). Otherwise, it must pass the slot address and
   128  // new.
   129  //
   130  // The caller must ensure there are no preemption points during the
   131  // above sequence. There must be no preemption points while buf is in
   132  // use because it is a per-P resource. There must be no preemption
   133  // points between the buffer put and the write to memory because this
   134  // could allow a GC phase change, which could result in missed write
   135  // barriers.
   136  //
   137  // putFast must be nowritebarrierrec to because write barriers here would
   138  // corrupt the write barrier buffer. It (and everything it calls, if
   139  // it called anything) has to be nosplit to avoid scheduling on to a
   140  // different P and a different buffer.
   141  //
   142  //go:nowritebarrierrec
   143  //go:nosplit
   144  func (b *wbBuf) putFast(old, new uintptr) bool {
   145  	p := (*[2]uintptr)(unsafe.Pointer(b.next))
   146  	p[0] = old
   147  	p[1] = new
   148  	b.next += 2 * goarch.PtrSize
   149  	return b.next != b.end
   150  }
   151  
   152  // wbBufFlush flushes the current P's write barrier buffer to the GC
   153  // workbufs. It is passed the slot and value of the write barrier that
   154  // caused the flush so that it can implement cgocheck.
   155  //
   156  // This must not have write barriers because it is part of the write
   157  // barrier implementation.
   158  //
   159  // This and everything it calls must be nosplit because 1) the stack
   160  // contains untyped slots from gcWriteBarrier and 2) there must not be
   161  // a GC safe point between the write barrier test in the caller and
   162  // flushing the buffer.
   163  //
   164  // TODO: A "go:nosplitrec" annotation would be perfect for this.
   165  //
   166  //go:nowritebarrierrec
   167  //go:nosplit
   168  func wbBufFlush(dst *uintptr, src uintptr) {
   169  	// Note: Every possible return from this function must reset
   170  	// the buffer's next pointer to prevent buffer overflow.
   171  
   172  	// This *must not* modify its arguments because this
   173  	// function's argument slots do double duty in gcWriteBarrier
   174  	// as register spill slots. Currently, not modifying the
   175  	// arguments is sufficient to keep the spill slots unmodified
   176  	// (which seems unlikely to change since it costs little and
   177  	// helps with debugging).
   178  
   179  	if getg().m.dying > 0 {
   180  		// We're going down. Not much point in write barriers
   181  		// and this way we can allow write barriers in the
   182  		// panic path.
   183  		getg().m.p.ptr().wbBuf.discard()
   184  		return
   185  	}
   186  
   187  	if writeBarrier.cgo && dst != nil {
   188  		// This must be called from the stack that did the
   189  		// write. It's nosplit all the way down.
   190  		cgoCheckWriteBarrier(dst, src)
   191  		if !writeBarrier.needed {
   192  			// We were only called for cgocheck.
   193  			getg().m.p.ptr().wbBuf.discard()
   194  			return
   195  		}
   196  	}
   197  
   198  	// Switch to the system stack so we don't have to worry about
   199  	// the untyped stack slots or safe points.
   200  	systemstack(func() {
   201  		wbBufFlush1(getg().m.p.ptr())
   202  	})
   203  }
   204  
   205  // wbBufFlush1 flushes p's write barrier buffer to the GC work queue.
   206  //
   207  // This must not have write barriers because it is part of the write
   208  // barrier implementation, so this may lead to infinite loops or
   209  // buffer corruption.
   210  //
   211  // This must be non-preemptible because it uses the P's workbuf.
   212  //
   213  //go:nowritebarrierrec
   214  //go:systemstack
   215  func wbBufFlush1(_p_ *p) {
   216  	// Get the buffered pointers.
   217  	start := uintptr(unsafe.Pointer(&_p_.wbBuf.buf[0]))
   218  	n := (_p_.wbBuf.next - start) / unsafe.Sizeof(_p_.wbBuf.buf[0])
   219  	ptrs := _p_.wbBuf.buf[:n]
   220  
   221  	// Poison the buffer to make extra sure nothing is enqueued
   222  	// while we're processing the buffer.
   223  	_p_.wbBuf.next = 0
   224  
   225  	if useCheckmark {
   226  		// Slow path for checkmark mode.
   227  		for _, ptr := range ptrs {
   228  			shade(ptr)
   229  		}
   230  		_p_.wbBuf.reset()
   231  		return
   232  	}
   233  
   234  	// Mark all of the pointers in the buffer and record only the
   235  	// pointers we greyed. We use the buffer itself to temporarily
   236  	// record greyed pointers.
   237  	//
   238  	// TODO: Should scanobject/scanblock just stuff pointers into
   239  	// the wbBuf? Then this would become the sole greying path.
   240  	//
   241  	// TODO: We could avoid shading any of the "new" pointers in
   242  	// the buffer if the stack has been shaded, or even avoid
   243  	// putting them in the buffer at all (which would double its
   244  	// capacity). This is slightly complicated with the buffer; we
   245  	// could track whether any un-shaded goroutine has used the
   246  	// buffer, or just track globally whether there are any
   247  	// un-shaded stacks and flush after each stack scan.
   248  	gcw := &_p_.gcw
   249  	pos := 0
   250  	for _, ptr := range ptrs {
   251  		if ptr < minLegalPointer {
   252  			// nil pointers are very common, especially
   253  			// for the "old" values. Filter out these and
   254  			// other "obvious" non-heap pointers ASAP.
   255  			//
   256  			// TODO: Should we filter out nils in the fast
   257  			// path to reduce the rate of flushes?
   258  			continue
   259  		}
   260  		obj, span, objIndex := findObject(ptr, 0, 0)
   261  		if obj == 0 {
   262  			continue
   263  		}
   264  		// TODO: Consider making two passes where the first
   265  		// just prefetches the mark bits.
   266  		mbits := span.markBitsForIndex(objIndex)
   267  		if mbits.isMarked() {
   268  			continue
   269  		}
   270  		mbits.setMarked()
   271  
   272  		// Mark span.
   273  		arena, pageIdx, pageMask := pageIndexOf(span.base())
   274  		if arena.pageMarks[pageIdx]&pageMask == 0 {
   275  			atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
   276  		}
   277  
   278  		if span.spanclass.noscan() {
   279  			gcw.bytesMarked += uint64(span.elemsize)
   280  			continue
   281  		}
   282  		ptrs[pos] = obj
   283  		pos++
   284  	}
   285  
   286  	// Enqueue the greyed objects.
   287  	gcw.putBatch(ptrs[:pos])
   288  
   289  	_p_.wbBuf.reset()
   290  }
   291  

View as plain text