Source file src/runtime/mgcpacer.go

     1  // Copyright 2021 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/cpu"
     9  	"internal/goexperiment"
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  const (
    15  	// gcGoalUtilization is the goal CPU utilization for
    16  	// marking as a fraction of GOMAXPROCS.
    17  	gcGoalUtilization = goexperiment.PacerRedesignInt*gcBackgroundUtilization +
    18  		(1-goexperiment.PacerRedesignInt)*(gcBackgroundUtilization+0.05)
    19  
    20  	// gcBackgroundUtilization is the fixed CPU utilization for background
    21  	// marking. It must be <= gcGoalUtilization. The difference between
    22  	// gcGoalUtilization and gcBackgroundUtilization will be made up by
    23  	// mark assists. The scheduler will aim to use within 50% of this
    24  	// goal.
    25  	//
    26  	// Setting this to < gcGoalUtilization avoids saturating the trigger
    27  	// feedback controller when there are no assists, which allows it to
    28  	// better control CPU and heap growth. However, the larger the gap,
    29  	// the more mutator assists are expected to happen, which impact
    30  	// mutator latency.
    31  	//
    32  	// If goexperiment.PacerRedesign, the trigger feedback controller
    33  	// is replaced with an estimate of the mark/cons ratio that doesn't
    34  	// have the same saturation issues, so this is set equal to
    35  	// gcGoalUtilization.
    36  	gcBackgroundUtilization = 0.25
    37  
    38  	// gcCreditSlack is the amount of scan work credit that can
    39  	// accumulate locally before updating gcController.heapScanWork and,
    40  	// optionally, gcController.bgScanCredit. Lower values give a more
    41  	// accurate assist ratio and make it more likely that assists will
    42  	// successfully steal background credit. Higher values reduce memory
    43  	// contention.
    44  	gcCreditSlack = 2000
    45  
    46  	// gcAssistTimeSlack is the nanoseconds of mutator assist time that
    47  	// can accumulate on a P before updating gcController.assistTime.
    48  	gcAssistTimeSlack = 5000
    49  
    50  	// gcOverAssistWork determines how many extra units of scan work a GC
    51  	// assist does when an assist happens. This amortizes the cost of an
    52  	// assist by pre-paying for this many bytes of future allocations.
    53  	gcOverAssistWork = 64 << 10
    54  
    55  	// defaultHeapMinimum is the value of heapMinimum for GOGC==100.
    56  	defaultHeapMinimum = (goexperiment.HeapMinimum512KiBInt)*(512<<10) +
    57  		(1-goexperiment.HeapMinimum512KiBInt)*(4<<20)
    58  
    59  	// scannableStackSizeSlack is the bytes of stack space allocated or freed
    60  	// that can accumulate on a P before updating gcController.stackSize.
    61  	scannableStackSizeSlack = 8 << 10
    62  )
    63  
    64  func init() {
    65  	if offset := unsafe.Offsetof(gcController.heapLive); offset%8 != 0 {
    66  		println(offset)
    67  		throw("gcController.heapLive not aligned to 8 bytes")
    68  	}
    69  }
    70  
    71  // gcController implements the GC pacing controller that determines
    72  // when to trigger concurrent garbage collection and how much marking
    73  // work to do in mutator assists and background marking.
    74  //
    75  // It uses a feedback control algorithm to adjust the gcController.trigger
    76  // trigger based on the heap growth and GC CPU utilization each cycle.
    77  // This algorithm optimizes for heap growth to match GOGC and for CPU
    78  // utilization between assist and background marking to be 25% of
    79  // GOMAXPROCS. The high-level design of this algorithm is documented
    80  // at https://golang.org/s/go15gcpacing.
    81  //
    82  // All fields of gcController are used only during a single mark
    83  // cycle.
    84  var gcController gcControllerState
    85  
    86  type gcControllerState struct {
    87  
    88  	// Initialized from GOGC. GOGC=off means no GC.
    89  	gcPercent atomic.Int32
    90  
    91  	_ uint32 // padding so following 64-bit values are 8-byte aligned
    92  
    93  	// heapMinimum is the minimum heap size at which to trigger GC.
    94  	// For small heaps, this overrides the usual GOGC*live set rule.
    95  	//
    96  	// When there is a very small live set but a lot of allocation, simply
    97  	// collecting when the heap reaches GOGC*live results in many GC
    98  	// cycles and high total per-GC overhead. This minimum amortizes this
    99  	// per-GC overhead while keeping the heap reasonably small.
   100  	//
   101  	// During initialization this is set to 4MB*GOGC/100. In the case of
   102  	// GOGC==0, this will set heapMinimum to 0, resulting in constant
   103  	// collection even when the heap size is small, which is useful for
   104  	// debugging.
   105  	heapMinimum uint64
   106  
   107  	// triggerRatio is the heap growth ratio that triggers marking.
   108  	//
   109  	// E.g., if this is 0.6, then GC should start when the live
   110  	// heap has reached 1.6 times the heap size marked by the
   111  	// previous cycle. This should be ≤ GOGC/100 so the trigger
   112  	// heap size is less than the goal heap size. This is set
   113  	// during mark termination for the next cycle's trigger.
   114  	//
   115  	// Protected by mheap_.lock or a STW.
   116  	//
   117  	// Used if !goexperiment.PacerRedesign.
   118  	triggerRatio float64
   119  
   120  	// trigger is the heap size that triggers marking.
   121  	//
   122  	// When heapLive ≥ trigger, the mark phase will start.
   123  	// This is also the heap size by which proportional sweeping
   124  	// must be complete.
   125  	//
   126  	// This is computed from triggerRatio during mark termination
   127  	// for the next cycle's trigger.
   128  	//
   129  	// Protected by mheap_.lock or a STW.
   130  	trigger uint64
   131  
   132  	// consMark is the estimated per-CPU consMark ratio for the application.
   133  	//
   134  	// It represents the ratio between the application's allocation
   135  	// rate, as bytes allocated per CPU-time, and the GC's scan rate,
   136  	// as bytes scanned per CPU-time.
   137  	// The units of this ratio are (B / cpu-ns) / (B / cpu-ns).
   138  	//
   139  	// At a high level, this value is computed as the bytes of memory
   140  	// allocated (cons) per unit of scan work completed (mark) in a GC
   141  	// cycle, divided by the CPU time spent on each activity.
   142  	//
   143  	// Updated at the end of each GC cycle, in endCycle.
   144  	//
   145  	// For goexperiment.PacerRedesign.
   146  	consMark float64
   147  
   148  	// consMarkController holds the state for the mark-cons ratio
   149  	// estimation over time.
   150  	//
   151  	// Its purpose is to smooth out noisiness in the computation of
   152  	// consMark; see consMark for details.
   153  	//
   154  	// For goexperiment.PacerRedesign.
   155  	consMarkController piController
   156  
   157  	_ uint32 // Padding for atomics on 32-bit platforms.
   158  
   159  	// heapGoal is the goal heapLive for when next GC ends.
   160  	// Set to ^uint64(0) if disabled.
   161  	//
   162  	// Read and written atomically, unless the world is stopped.
   163  	heapGoal uint64
   164  
   165  	// lastHeapGoal is the value of heapGoal for the previous GC.
   166  	// Note that this is distinct from the last value heapGoal had,
   167  	// because it could change if e.g. gcPercent changes.
   168  	//
   169  	// Read and written with the world stopped or with mheap_.lock held.
   170  	lastHeapGoal uint64
   171  
   172  	// heapLive is the number of bytes considered live by the GC.
   173  	// That is: retained by the most recent GC plus allocated
   174  	// since then. heapLive ≤ memstats.heapAlloc, since heapAlloc includes
   175  	// unmarked objects that have not yet been swept (and hence goes up as we
   176  	// allocate and down as we sweep) while heapLive excludes these
   177  	// objects (and hence only goes up between GCs).
   178  	//
   179  	// This is updated atomically without locking. To reduce
   180  	// contention, this is updated only when obtaining a span from
   181  	// an mcentral and at this point it counts all of the
   182  	// unallocated slots in that span (which will be allocated
   183  	// before that mcache obtains another span from that
   184  	// mcentral). Hence, it slightly overestimates the "true" live
   185  	// heap size. It's better to overestimate than to
   186  	// underestimate because 1) this triggers the GC earlier than
   187  	// necessary rather than potentially too late and 2) this
   188  	// leads to a conservative GC rate rather than a GC rate that
   189  	// is potentially too low.
   190  	//
   191  	// Reads should likewise be atomic (or during STW).
   192  	//
   193  	// Whenever this is updated, call traceHeapAlloc() and
   194  	// this gcControllerState's revise() method.
   195  	heapLive uint64
   196  
   197  	// heapScan is the number of bytes of "scannable" heap. This
   198  	// is the live heap (as counted by heapLive), but omitting
   199  	// no-scan objects and no-scan tails of objects.
   200  	//
   201  	// For !goexperiment.PacerRedesign: Whenever this is updated,
   202  	// call this gcControllerState's revise() method. It is read
   203  	// and written atomically or with the world stopped.
   204  	//
   205  	// For goexperiment.PacerRedesign: This value is fixed at the
   206  	// start of a GC cycle, so during a GC cycle it is safe to
   207  	// read without atomics, and it represents the maximum scannable
   208  	// heap.
   209  	heapScan uint64
   210  
   211  	// lastHeapScan is the number of bytes of heap that were scanned
   212  	// last GC cycle. It is the same as heapMarked, but only
   213  	// includes the "scannable" parts of objects.
   214  	//
   215  	// Updated when the world is stopped.
   216  	lastHeapScan uint64
   217  
   218  	// stackScan is a snapshot of scannableStackSize taken at each GC
   219  	// STW pause and is used in pacing decisions.
   220  	//
   221  	// Updated only while the world is stopped.
   222  	stackScan uint64
   223  
   224  	// scannableStackSize is the amount of allocated goroutine stack space in
   225  	// use by goroutines.
   226  	//
   227  	// This number tracks allocated goroutine stack space rather than used
   228  	// goroutine stack space (i.e. what is actually scanned) because used
   229  	// goroutine stack space is much harder to measure cheaply. By using
   230  	// allocated space, we make an overestimate; this is OK, it's better
   231  	// to conservatively overcount than undercount.
   232  	//
   233  	// Read and updated atomically.
   234  	scannableStackSize uint64
   235  
   236  	// globalsScan is the total amount of global variable space
   237  	// that is scannable.
   238  	//
   239  	// Read and updated atomically.
   240  	globalsScan uint64
   241  
   242  	// heapMarked is the number of bytes marked by the previous
   243  	// GC. After mark termination, heapLive == heapMarked, but
   244  	// unlike heapLive, heapMarked does not change until the
   245  	// next mark termination.
   246  	heapMarked uint64
   247  
   248  	// heapScanWork is the total heap scan work performed this cycle.
   249  	// stackScanWork is the total stack scan work performed this cycle.
   250  	// globalsScanWork is the total globals scan work performed this cycle.
   251  	//
   252  	// These are updated atomically during the cycle. Updates occur in
   253  	// bounded batches, since they are both written and read
   254  	// throughout the cycle. At the end of the cycle, heapScanWork is how
   255  	// much of the retained heap is scannable.
   256  	//
   257  	// Currently these are measured in bytes. For most uses, this is an
   258  	// opaque unit of work, but for estimation the definition is important.
   259  	//
   260  	// Note that stackScanWork includes all allocated space, not just the
   261  	// size of the stack itself, mirroring stackSize.
   262  	//
   263  	// For !goexperiment.PacerRedesign, stackScanWork and globalsScanWork
   264  	// are always zero.
   265  	heapScanWork    atomic.Int64
   266  	stackScanWork   atomic.Int64
   267  	globalsScanWork atomic.Int64
   268  
   269  	// bgScanCredit is the scan work credit accumulated by the
   270  	// concurrent background scan. This credit is accumulated by
   271  	// the background scan and stolen by mutator assists. This is
   272  	// updated atomically. Updates occur in bounded batches, since
   273  	// it is both written and read throughout the cycle.
   274  	bgScanCredit int64
   275  
   276  	// assistTime is the nanoseconds spent in mutator assists
   277  	// during this cycle. This is updated atomically. Updates
   278  	// occur in bounded batches, since it is both written and read
   279  	// throughout the cycle.
   280  	assistTime int64
   281  
   282  	// dedicatedMarkTime is the nanoseconds spent in dedicated
   283  	// mark workers during this cycle. This is updated atomically
   284  	// at the end of the concurrent mark phase.
   285  	dedicatedMarkTime int64
   286  
   287  	// fractionalMarkTime is the nanoseconds spent in the
   288  	// fractional mark worker during this cycle. This is updated
   289  	// atomically throughout the cycle and will be up-to-date if
   290  	// the fractional mark worker is not currently running.
   291  	fractionalMarkTime int64
   292  
   293  	// idleMarkTime is the nanoseconds spent in idle marking
   294  	// during this cycle. This is updated atomically throughout
   295  	// the cycle.
   296  	idleMarkTime int64
   297  
   298  	// markStartTime is the absolute start time in nanoseconds
   299  	// that assists and background mark workers started.
   300  	markStartTime int64
   301  
   302  	// dedicatedMarkWorkersNeeded is the number of dedicated mark
   303  	// workers that need to be started. This is computed at the
   304  	// beginning of each cycle and decremented atomically as
   305  	// dedicated mark workers get started.
   306  	dedicatedMarkWorkersNeeded int64
   307  
   308  	// assistWorkPerByte is the ratio of scan work to allocated
   309  	// bytes that should be performed by mutator assists. This is
   310  	// computed at the beginning of each cycle and updated every
   311  	// time heapScan is updated.
   312  	assistWorkPerByte atomic.Float64
   313  
   314  	// assistBytesPerWork is 1/assistWorkPerByte.
   315  	//
   316  	// Note that because this is read and written independently
   317  	// from assistWorkPerByte users may notice a skew between
   318  	// the two values, and such a state should be safe.
   319  	assistBytesPerWork atomic.Float64
   320  
   321  	// fractionalUtilizationGoal is the fraction of wall clock
   322  	// time that should be spent in the fractional mark worker on
   323  	// each P that isn't running a dedicated worker.
   324  	//
   325  	// For example, if the utilization goal is 25% and there are
   326  	// no dedicated workers, this will be 0.25. If the goal is
   327  	// 25%, there is one dedicated worker, and GOMAXPROCS is 5,
   328  	// this will be 0.05 to make up the missing 5%.
   329  	//
   330  	// If this is zero, no fractional workers are needed.
   331  	fractionalUtilizationGoal float64
   332  
   333  	// test indicates that this is a test-only copy of gcControllerState.
   334  	test bool
   335  
   336  	_ cpu.CacheLinePad
   337  }
   338  
   339  func (c *gcControllerState) init(gcPercent int32) {
   340  	c.heapMinimum = defaultHeapMinimum
   341  
   342  	if goexperiment.PacerRedesign {
   343  		c.consMarkController = piController{
   344  			// Tuned first via the Ziegler-Nichols process in simulation,
   345  			// then the integral time was manually tuned against real-world
   346  			// applications to deal with noisiness in the measured cons/mark
   347  			// ratio.
   348  			kp: 0.9,
   349  			ti: 4.0,
   350  
   351  			// Set a high reset time in GC cycles.
   352  			// This is inversely proportional to the rate at which we
   353  			// accumulate error from clipping. By making this very high
   354  			// we make the accumulation slow. In general, clipping is
   355  			// OK in our situation, hence the choice.
   356  			//
   357  			// Tune this if we get unintended effects from clipping for
   358  			// a long time.
   359  			tt:  1000,
   360  			min: -1000,
   361  			max: 1000,
   362  		}
   363  	} else {
   364  		// Set a reasonable initial GC trigger.
   365  		c.triggerRatio = 7 / 8.0
   366  
   367  		// Fake a heapMarked value so it looks like a trigger at
   368  		// heapMinimum is the appropriate growth from heapMarked.
   369  		// This will go into computing the initial GC goal.
   370  		c.heapMarked = uint64(float64(c.heapMinimum) / (1 + c.triggerRatio))
   371  	}
   372  
   373  	// This will also compute and set the GC trigger and goal.
   374  	c.setGCPercent(gcPercent)
   375  }
   376  
   377  // startCycle resets the GC controller's state and computes estimates
   378  // for a new GC cycle. The caller must hold worldsema and the world
   379  // must be stopped.
   380  func (c *gcControllerState) startCycle(markStartTime int64, procs int) {
   381  	c.heapScanWork.Store(0)
   382  	c.stackScanWork.Store(0)
   383  	c.globalsScanWork.Store(0)
   384  	c.bgScanCredit = 0
   385  	c.assistTime = 0
   386  	c.dedicatedMarkTime = 0
   387  	c.fractionalMarkTime = 0
   388  	c.idleMarkTime = 0
   389  	c.markStartTime = markStartTime
   390  	c.stackScan = atomic.Load64(&c.scannableStackSize)
   391  
   392  	// Ensure that the heap goal is at least a little larger than
   393  	// the current live heap size. This may not be the case if GC
   394  	// start is delayed or if the allocation that pushed gcController.heapLive
   395  	// over trigger is large or if the trigger is really close to
   396  	// GOGC. Assist is proportional to this distance, so enforce a
   397  	// minimum distance, even if it means going over the GOGC goal
   398  	// by a tiny bit.
   399  	if goexperiment.PacerRedesign {
   400  		if c.heapGoal < c.heapLive+64<<10 {
   401  			c.heapGoal = c.heapLive + 64<<10
   402  		}
   403  	} else {
   404  		if c.heapGoal < c.heapLive+1<<20 {
   405  			c.heapGoal = c.heapLive + 1<<20
   406  		}
   407  	}
   408  
   409  	// Compute the background mark utilization goal. In general,
   410  	// this may not come out exactly. We round the number of
   411  	// dedicated workers so that the utilization is closest to
   412  	// 25%. For small GOMAXPROCS, this would introduce too much
   413  	// error, so we add fractional workers in that case.
   414  	totalUtilizationGoal := float64(procs) * gcBackgroundUtilization
   415  	c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal + 0.5)
   416  	utilError := float64(c.dedicatedMarkWorkersNeeded)/totalUtilizationGoal - 1
   417  	const maxUtilError = 0.3
   418  	if utilError < -maxUtilError || utilError > maxUtilError {
   419  		// Rounding put us more than 30% off our goal. With
   420  		// gcBackgroundUtilization of 25%, this happens for
   421  		// GOMAXPROCS<=3 or GOMAXPROCS=6. Enable fractional
   422  		// workers to compensate.
   423  		if float64(c.dedicatedMarkWorkersNeeded) > totalUtilizationGoal {
   424  			// Too many dedicated workers.
   425  			c.dedicatedMarkWorkersNeeded--
   426  		}
   427  		c.fractionalUtilizationGoal = (totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded)) / float64(procs)
   428  	} else {
   429  		c.fractionalUtilizationGoal = 0
   430  	}
   431  
   432  	// In STW mode, we just want dedicated workers.
   433  	if debug.gcstoptheworld > 0 {
   434  		c.dedicatedMarkWorkersNeeded = int64(procs)
   435  		c.fractionalUtilizationGoal = 0
   436  	}
   437  
   438  	// Clear per-P state
   439  	for _, p := range allp {
   440  		p.gcAssistTime = 0
   441  		p.gcFractionalMarkTime = 0
   442  	}
   443  
   444  	// Compute initial values for controls that are updated
   445  	// throughout the cycle.
   446  	c.revise()
   447  
   448  	if debug.gcpacertrace > 0 {
   449  		assistRatio := c.assistWorkPerByte.Load()
   450  		print("pacer: assist ratio=", assistRatio,
   451  			" (scan ", gcController.heapScan>>20, " MB in ",
   452  			work.initialHeapLive>>20, "->",
   453  			c.heapGoal>>20, " MB)",
   454  			" workers=", c.dedicatedMarkWorkersNeeded,
   455  			"+", c.fractionalUtilizationGoal, "\n")
   456  	}
   457  }
   458  
   459  // revise updates the assist ratio during the GC cycle to account for
   460  // improved estimates. This should be called whenever gcController.heapScan,
   461  // gcController.heapLive, or gcController.heapGoal is updated. It is safe to
   462  // call concurrently, but it may race with other calls to revise.
   463  //
   464  // The result of this race is that the two assist ratio values may not line
   465  // up or may be stale. In practice this is OK because the assist ratio
   466  // moves slowly throughout a GC cycle, and the assist ratio is a best-effort
   467  // heuristic anyway. Furthermore, no part of the heuristic depends on
   468  // the two assist ratio values being exact reciprocals of one another, since
   469  // the two values are used to convert values from different sources.
   470  //
   471  // The worst case result of this raciness is that we may miss a larger shift
   472  // in the ratio (say, if we decide to pace more aggressively against the
   473  // hard heap goal) but even this "hard goal" is best-effort (see #40460).
   474  // The dedicated GC should ensure we don't exceed the hard goal by too much
   475  // in the rare case we do exceed it.
   476  //
   477  // It should only be called when gcBlackenEnabled != 0 (because this
   478  // is when assists are enabled and the necessary statistics are
   479  // available).
   480  func (c *gcControllerState) revise() {
   481  	gcPercent := c.gcPercent.Load()
   482  	if gcPercent < 0 {
   483  		// If GC is disabled but we're running a forced GC,
   484  		// act like GOGC is huge for the below calculations.
   485  		gcPercent = 100000
   486  	}
   487  	live := atomic.Load64(&c.heapLive)
   488  	scan := atomic.Load64(&c.heapScan)
   489  	work := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
   490  
   491  	// Assume we're under the soft goal. Pace GC to complete at
   492  	// heapGoal assuming the heap is in steady-state.
   493  	heapGoal := int64(atomic.Load64(&c.heapGoal))
   494  
   495  	var scanWorkExpected int64
   496  	if goexperiment.PacerRedesign {
   497  		// The expected scan work is computed as the amount of bytes scanned last
   498  		// GC cycle, plus our estimate of stacks and globals work for this cycle.
   499  		scanWorkExpected = int64(c.lastHeapScan + c.stackScan + c.globalsScan)
   500  
   501  		// maxScanWork is a worst-case estimate of the amount of scan work that
   502  		// needs to be performed in this GC cycle. Specifically, it represents
   503  		// the case where *all* scannable memory turns out to be live.
   504  		maxScanWork := int64(scan + c.stackScan + c.globalsScan)
   505  		if work > scanWorkExpected {
   506  			// We've already done more scan work than expected. Because our expectation
   507  			// is based on a steady-state scannable heap size, we assume this means our
   508  			// heap is growing. Compute a new heap goal that takes our existing runway
   509  			// computed for scanWorkExpected and extrapolates it to maxScanWork, the worst-case
   510  			// scan work. This keeps our assist ratio stable if the heap continues to grow.
   511  			//
   512  			// The effect of this mechanism is that assists stay flat in the face of heap
   513  			// growths. It's OK to use more memory this cycle to scan all the live heap,
   514  			// because the next GC cycle is inevitably going to use *at least* that much
   515  			// memory anyway.
   516  			extHeapGoal := int64(float64(heapGoal-int64(c.trigger))/float64(scanWorkExpected)*float64(maxScanWork)) + int64(c.trigger)
   517  			scanWorkExpected = maxScanWork
   518  
   519  			// hardGoal is a hard limit on the amount that we're willing to push back the
   520  			// heap goal, and that's twice the heap goal (i.e. if GOGC=100 and the heap and/or
   521  			// stacks and/or globals grow to twice their size, this limits the current GC cycle's
   522  			// growth to 4x the original live heap's size).
   523  			//
   524  			// This maintains the invariant that we use no more memory than the next GC cycle
   525  			// will anyway.
   526  			hardGoal := int64((1.0 + float64(gcPercent)/100.0) * float64(heapGoal))
   527  			if extHeapGoal > hardGoal {
   528  				extHeapGoal = hardGoal
   529  			}
   530  			heapGoal = extHeapGoal
   531  		}
   532  		if int64(live) > heapGoal {
   533  			// We're already past our heap goal, even the extrapolated one.
   534  			// Leave ourselves some extra runway, so in the worst case we
   535  			// finish by that point.
   536  			const maxOvershoot = 1.1
   537  			heapGoal = int64(float64(heapGoal) * maxOvershoot)
   538  
   539  			// Compute the upper bound on the scan work remaining.
   540  			scanWorkExpected = maxScanWork
   541  		}
   542  	} else {
   543  		// Compute the expected scan work remaining.
   544  		//
   545  		// This is estimated based on the expected
   546  		// steady-state scannable heap. For example, with
   547  		// GOGC=100, only half of the scannable heap is
   548  		// expected to be live, so that's what we target.
   549  		//
   550  		// (This is a float calculation to avoid overflowing on
   551  		// 100*heapScan.)
   552  		scanWorkExpected = int64(float64(scan) * 100 / float64(100+gcPercent))
   553  		if int64(live) > heapGoal || work > scanWorkExpected {
   554  			// We're past the soft goal, or we've already done more scan
   555  			// work than we expected. Pace GC so that in the worst case it
   556  			// will complete by the hard goal.
   557  			const maxOvershoot = 1.1
   558  			heapGoal = int64(float64(heapGoal) * maxOvershoot)
   559  
   560  			// Compute the upper bound on the scan work remaining.
   561  			scanWorkExpected = int64(scan)
   562  		}
   563  	}
   564  
   565  	// Compute the remaining scan work estimate.
   566  	//
   567  	// Note that we currently count allocations during GC as both
   568  	// scannable heap (heapScan) and scan work completed
   569  	// (scanWork), so allocation will change this difference
   570  	// slowly in the soft regime and not at all in the hard
   571  	// regime.
   572  	scanWorkRemaining := scanWorkExpected - work
   573  	if scanWorkRemaining < 1000 {
   574  		// We set a somewhat arbitrary lower bound on
   575  		// remaining scan work since if we aim a little high,
   576  		// we can miss by a little.
   577  		//
   578  		// We *do* need to enforce that this is at least 1,
   579  		// since marking is racy and double-scanning objects
   580  		// may legitimately make the remaining scan work
   581  		// negative, even in the hard goal regime.
   582  		scanWorkRemaining = 1000
   583  	}
   584  
   585  	// Compute the heap distance remaining.
   586  	heapRemaining := heapGoal - int64(live)
   587  	if heapRemaining <= 0 {
   588  		// This shouldn't happen, but if it does, avoid
   589  		// dividing by zero or setting the assist negative.
   590  		heapRemaining = 1
   591  	}
   592  
   593  	// Compute the mutator assist ratio so by the time the mutator
   594  	// allocates the remaining heap bytes up to heapGoal, it will
   595  	// have done (or stolen) the remaining amount of scan work.
   596  	// Note that the assist ratio values are updated atomically
   597  	// but not together. This means there may be some degree of
   598  	// skew between the two values. This is generally OK as the
   599  	// values shift relatively slowly over the course of a GC
   600  	// cycle.
   601  	assistWorkPerByte := float64(scanWorkRemaining) / float64(heapRemaining)
   602  	assistBytesPerWork := float64(heapRemaining) / float64(scanWorkRemaining)
   603  	c.assistWorkPerByte.Store(assistWorkPerByte)
   604  	c.assistBytesPerWork.Store(assistBytesPerWork)
   605  }
   606  
   607  // endCycle computes the trigger ratio (!goexperiment.PacerRedesign)
   608  // or the consMark estimate (goexperiment.PacerRedesign) for the next cycle.
   609  // Returns the trigger ratio if application, or 0 (goexperiment.PacerRedesign).
   610  // userForced indicates whether the current GC cycle was forced
   611  // by the application.
   612  func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) float64 {
   613  	// Record last heap goal for the scavenger.
   614  	// We'll be updating the heap goal soon.
   615  	gcController.lastHeapGoal = gcController.heapGoal
   616  
   617  	// Compute the duration of time for which assists were turned on.
   618  	assistDuration := now - c.markStartTime
   619  
   620  	// Assume background mark hit its utilization goal.
   621  	utilization := gcBackgroundUtilization
   622  	// Add assist utilization; avoid divide by zero.
   623  	if assistDuration > 0 {
   624  		utilization += float64(c.assistTime) / float64(assistDuration*int64(procs))
   625  	}
   626  
   627  	if goexperiment.PacerRedesign {
   628  		if c.heapLive <= c.trigger {
   629  			// Shouldn't happen, but let's be very safe about this in case the
   630  			// GC is somehow extremely short.
   631  			//
   632  			// In this case though, the only reasonable value for c.heapLive-c.trigger
   633  			// would be 0, which isn't really all that useful, i.e. the GC was so short
   634  			// that it didn't matter.
   635  			//
   636  			// Ignore this case and don't update anything.
   637  			return 0
   638  		}
   639  		idleUtilization := 0.0
   640  		if assistDuration > 0 {
   641  			idleUtilization = float64(c.idleMarkTime) / float64(assistDuration*int64(procs))
   642  		}
   643  		// Determine the cons/mark ratio.
   644  		//
   645  		// The units we want for the numerator and denominator are both B / cpu-ns.
   646  		// We get this by taking the bytes allocated or scanned, and divide by the amount of
   647  		// CPU time it took for those operations. For allocations, that CPU time is
   648  		//
   649  		//    assistDuration * procs * (1 - utilization)
   650  		//
   651  		// Where utilization includes just background GC workers and assists. It does *not*
   652  		// include idle GC work time, because in theory the mutator is free to take that at
   653  		// any point.
   654  		//
   655  		// For scanning, that CPU time is
   656  		//
   657  		//    assistDuration * procs * (utilization + idleUtilization)
   658  		//
   659  		// In this case, we *include* idle utilization, because that is additional CPU time that the
   660  		// the GC had available to it.
   661  		//
   662  		// In effect, idle GC time is sort of double-counted here, but it's very weird compared
   663  		// to other kinds of GC work, because of how fluid it is. Namely, because the mutator is
   664  		// *always* free to take it.
   665  		//
   666  		// So this calculation is really:
   667  		//     (heapLive-trigger) / (assistDuration * procs * (1-utilization)) /
   668  		//         (scanWork) / (assistDuration * procs * (utilization+idleUtilization)
   669  		//
   670  		// Note that because we only care about the ratio, assistDuration and procs cancel out.
   671  		scanWork := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
   672  		currentConsMark := (float64(c.heapLive-c.trigger) * (utilization + idleUtilization)) /
   673  			(float64(scanWork) * (1 - utilization))
   674  
   675  		// Update cons/mark controller. The time period for this is 1 GC cycle.
   676  		//
   677  		// This use of a PI controller might seem strange. So, here's an explanation:
   678  		//
   679  		// currentConsMark represents the consMark we *should've* had to be perfectly
   680  		// on-target for this cycle. Given that we assume the next GC will be like this
   681  		// one in the steady-state, it stands to reason that we should just pick that
   682  		// as our next consMark. In practice, however, currentConsMark is too noisy:
   683  		// we're going to be wildly off-target in each GC cycle if we do that.
   684  		//
   685  		// What we do instead is make a long-term assumption: there is some steady-state
   686  		// consMark value, but it's obscured by noise. By constantly shooting for this
   687  		// noisy-but-perfect consMark value, the controller will bounce around a bit,
   688  		// but its average behavior, in aggregate, should be less noisy and closer to
   689  		// the true long-term consMark value, provided its tuned to be slightly overdamped.
   690  		var ok bool
   691  		oldConsMark := c.consMark
   692  		c.consMark, ok = c.consMarkController.next(c.consMark, currentConsMark, 1.0)
   693  		if !ok {
   694  			// The error spiraled out of control. This is incredibly unlikely seeing
   695  			// as this controller is essentially just a smoothing function, but it might
   696  			// mean that something went very wrong with how currentConsMark was calculated.
   697  			// Just reset consMark and keep going.
   698  			c.consMark = 0
   699  		}
   700  
   701  		if debug.gcpacertrace > 0 {
   702  			printlock()
   703  			goal := gcGoalUtilization * 100
   704  			print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ")
   705  			print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.stackScan+c.globalsScan, " B exp.) ")
   706  			print("in ", c.trigger, " B -> ", c.heapLive, " B (∆goal ", int64(c.heapLive)-int64(c.heapGoal), ", cons/mark ", oldConsMark, ")")
   707  			if !ok {
   708  				print("[controller reset]")
   709  			}
   710  			println()
   711  			printunlock()
   712  		}
   713  		return 0
   714  	}
   715  
   716  	// !goexperiment.PacerRedesign below.
   717  
   718  	if userForced {
   719  		// Forced GC means this cycle didn't start at the
   720  		// trigger, so where it finished isn't good
   721  		// information about how to adjust the trigger.
   722  		// Just leave it where it is.
   723  		return c.triggerRatio
   724  	}
   725  
   726  	// Proportional response gain for the trigger controller. Must
   727  	// be in [0, 1]. Lower values smooth out transient effects but
   728  	// take longer to respond to phase changes. Higher values
   729  	// react to phase changes quickly, but are more affected by
   730  	// transient changes. Values near 1 may be unstable.
   731  	const triggerGain = 0.5
   732  
   733  	// Compute next cycle trigger ratio. First, this computes the
   734  	// "error" for this cycle; that is, how far off the trigger
   735  	// was from what it should have been, accounting for both heap
   736  	// growth and GC CPU utilization. We compute the actual heap
   737  	// growth during this cycle and scale that by how far off from
   738  	// the goal CPU utilization we were (to estimate the heap
   739  	// growth if we had the desired CPU utilization). The
   740  	// difference between this estimate and the GOGC-based goal
   741  	// heap growth is the error.
   742  	goalGrowthRatio := c.effectiveGrowthRatio()
   743  	actualGrowthRatio := float64(c.heapLive)/float64(c.heapMarked) - 1
   744  	triggerError := goalGrowthRatio - c.triggerRatio - utilization/gcGoalUtilization*(actualGrowthRatio-c.triggerRatio)
   745  
   746  	// Finally, we adjust the trigger for next time by this error,
   747  	// damped by the proportional gain.
   748  	triggerRatio := c.triggerRatio + triggerGain*triggerError
   749  
   750  	if debug.gcpacertrace > 0 {
   751  		// Print controller state in terms of the design
   752  		// document.
   753  		H_m_prev := c.heapMarked
   754  		h_t := c.triggerRatio
   755  		H_T := c.trigger
   756  		h_a := actualGrowthRatio
   757  		H_a := c.heapLive
   758  		h_g := goalGrowthRatio
   759  		H_g := int64(float64(H_m_prev) * (1 + h_g))
   760  		u_a := utilization
   761  		u_g := gcGoalUtilization
   762  		W_a := c.heapScanWork.Load()
   763  		print("pacer: H_m_prev=", H_m_prev,
   764  			" h_t=", h_t, " H_T=", H_T,
   765  			" h_a=", h_a, " H_a=", H_a,
   766  			" h_g=", h_g, " H_g=", H_g,
   767  			" u_a=", u_a, " u_g=", u_g,
   768  			" W_a=", W_a,
   769  			" goalΔ=", goalGrowthRatio-h_t,
   770  			" actualΔ=", h_a-h_t,
   771  			" u_a/u_g=", u_a/u_g,
   772  			"\n")
   773  	}
   774  
   775  	return triggerRatio
   776  }
   777  
   778  // enlistWorker encourages another dedicated mark worker to start on
   779  // another P if there are spare worker slots. It is used by putfull
   780  // when more work is made available.
   781  //
   782  //go:nowritebarrier
   783  func (c *gcControllerState) enlistWorker() {
   784  	// If there are idle Ps, wake one so it will run an idle worker.
   785  	// NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112.
   786  	//
   787  	//	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
   788  	//		wakep()
   789  	//		return
   790  	//	}
   791  
   792  	// There are no idle Ps. If we need more dedicated workers,
   793  	// try to preempt a running P so it will switch to a worker.
   794  	if c.dedicatedMarkWorkersNeeded <= 0 {
   795  		return
   796  	}
   797  	// Pick a random other P to preempt.
   798  	if gomaxprocs <= 1 {
   799  		return
   800  	}
   801  	gp := getg()
   802  	if gp == nil || gp.m == nil || gp.m.p == 0 {
   803  		return
   804  	}
   805  	myID := gp.m.p.ptr().id
   806  	for tries := 0; tries < 5; tries++ {
   807  		id := int32(fastrandn(uint32(gomaxprocs - 1)))
   808  		if id >= myID {
   809  			id++
   810  		}
   811  		p := allp[id]
   812  		if p.status != _Prunning {
   813  			continue
   814  		}
   815  		if preemptone(p) {
   816  			return
   817  		}
   818  	}
   819  }
   820  
   821  // findRunnableGCWorker returns a background mark worker for _p_ if it
   822  // should be run. This must only be called when gcBlackenEnabled != 0.
   823  func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g {
   824  	if gcBlackenEnabled == 0 {
   825  		throw("gcControllerState.findRunnable: blackening not enabled")
   826  	}
   827  
   828  	if !gcMarkWorkAvailable(_p_) {
   829  		// No work to be done right now. This can happen at
   830  		// the end of the mark phase when there are still
   831  		// assists tapering off. Don't bother running a worker
   832  		// now because it'll just return immediately.
   833  		return nil
   834  	}
   835  
   836  	// Grab a worker before we commit to running below.
   837  	node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
   838  	if node == nil {
   839  		// There is at least one worker per P, so normally there are
   840  		// enough workers to run on all Ps, if necessary. However, once
   841  		// a worker enters gcMarkDone it may park without rejoining the
   842  		// pool, thus freeing a P with no corresponding worker.
   843  		// gcMarkDone never depends on another worker doing work, so it
   844  		// is safe to simply do nothing here.
   845  		//
   846  		// If gcMarkDone bails out without completing the mark phase,
   847  		// it will always do so with queued global work. Thus, that P
   848  		// will be immediately eligible to re-run the worker G it was
   849  		// just using, ensuring work can complete.
   850  		return nil
   851  	}
   852  
   853  	decIfPositive := func(ptr *int64) bool {
   854  		for {
   855  			v := atomic.Loadint64(ptr)
   856  			if v <= 0 {
   857  				return false
   858  			}
   859  
   860  			if atomic.Casint64(ptr, v, v-1) {
   861  				return true
   862  			}
   863  		}
   864  	}
   865  
   866  	if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
   867  		// This P is now dedicated to marking until the end of
   868  		// the concurrent mark phase.
   869  		_p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
   870  	} else if c.fractionalUtilizationGoal == 0 {
   871  		// No need for fractional workers.
   872  		gcBgMarkWorkerPool.push(&node.node)
   873  		return nil
   874  	} else {
   875  		// Is this P behind on the fractional utilization
   876  		// goal?
   877  		//
   878  		// This should be kept in sync with pollFractionalWorkerExit.
   879  		delta := nanotime() - c.markStartTime
   880  		if delta > 0 && float64(_p_.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal {
   881  			// Nope. No need to run a fractional worker.
   882  			gcBgMarkWorkerPool.push(&node.node)
   883  			return nil
   884  		}
   885  		// Run a fractional worker.
   886  		_p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode
   887  	}
   888  
   889  	// Run the background mark worker.
   890  	gp := node.gp.ptr()
   891  	casgstatus(gp, _Gwaiting, _Grunnable)
   892  	if trace.enabled {
   893  		traceGoUnpark(gp, 0)
   894  	}
   895  	return gp
   896  }
   897  
   898  // resetLive sets up the controller state for the next mark phase after the end
   899  // of the previous one. Must be called after endCycle and before commit, before
   900  // the world is started.
   901  //
   902  // The world must be stopped.
   903  func (c *gcControllerState) resetLive(bytesMarked uint64) {
   904  	c.heapMarked = bytesMarked
   905  	c.heapLive = bytesMarked
   906  	c.heapScan = uint64(c.heapScanWork.Load())
   907  	c.lastHeapScan = uint64(c.heapScanWork.Load())
   908  
   909  	// heapLive was updated, so emit a trace event.
   910  	if trace.enabled {
   911  		traceHeapAlloc()
   912  	}
   913  }
   914  
   915  // logWorkTime updates mark work accounting in the controller by a duration of
   916  // work in nanoseconds.
   917  //
   918  // Safe to execute at any time.
   919  func (c *gcControllerState) logWorkTime(mode gcMarkWorkerMode, duration int64) {
   920  	switch mode {
   921  	case gcMarkWorkerDedicatedMode:
   922  		atomic.Xaddint64(&c.dedicatedMarkTime, duration)
   923  		atomic.Xaddint64(&c.dedicatedMarkWorkersNeeded, 1)
   924  	case gcMarkWorkerFractionalMode:
   925  		atomic.Xaddint64(&c.fractionalMarkTime, duration)
   926  	case gcMarkWorkerIdleMode:
   927  		atomic.Xaddint64(&c.idleMarkTime, duration)
   928  	default:
   929  		throw("logWorkTime: unknown mark worker mode")
   930  	}
   931  }
   932  
   933  func (c *gcControllerState) update(dHeapLive, dHeapScan int64) {
   934  	if dHeapLive != 0 {
   935  		atomic.Xadd64(&gcController.heapLive, dHeapLive)
   936  		if trace.enabled {
   937  			// gcController.heapLive changed.
   938  			traceHeapAlloc()
   939  		}
   940  	}
   941  	// Only update heapScan in the new pacer redesign if we're not
   942  	// currently in a GC.
   943  	if !goexperiment.PacerRedesign || gcBlackenEnabled == 0 {
   944  		if dHeapScan != 0 {
   945  			atomic.Xadd64(&gcController.heapScan, dHeapScan)
   946  		}
   947  	}
   948  	if gcBlackenEnabled != 0 {
   949  		// gcController.heapLive and heapScan changed.
   950  		c.revise()
   951  	}
   952  }
   953  
   954  func (c *gcControllerState) addScannableStack(pp *p, amount int64) {
   955  	if pp == nil {
   956  		atomic.Xadd64(&c.scannableStackSize, amount)
   957  		return
   958  	}
   959  	pp.scannableStackSizeDelta += amount
   960  	if pp.scannableStackSizeDelta >= scannableStackSizeSlack || pp.scannableStackSizeDelta <= -scannableStackSizeSlack {
   961  		atomic.Xadd64(&c.scannableStackSize, pp.scannableStackSizeDelta)
   962  		pp.scannableStackSizeDelta = 0
   963  	}
   964  }
   965  
   966  func (c *gcControllerState) addGlobals(amount int64) {
   967  	atomic.Xadd64(&c.globalsScan, amount)
   968  }
   969  
   970  // commit recomputes all pacing parameters from scratch, namely
   971  // absolute trigger, the heap goal, mark pacing, and sweep pacing.
   972  //
   973  // If goexperiment.PacerRedesign is true, triggerRatio is ignored.
   974  //
   975  // This can be called any time. If GC is the in the middle of a
   976  // concurrent phase, it will adjust the pacing of that phase.
   977  //
   978  // This depends on gcPercent, gcController.heapMarked, and
   979  // gcController.heapLive. These must be up to date.
   980  //
   981  // mheap_.lock must be held or the world must be stopped.
   982  func (c *gcControllerState) commit(triggerRatio float64) {
   983  	if !c.test {
   984  		assertWorldStoppedOrLockHeld(&mheap_.lock)
   985  	}
   986  
   987  	if !goexperiment.PacerRedesign {
   988  		c.oldCommit(triggerRatio)
   989  		return
   990  	}
   991  
   992  	// Compute the next GC goal, which is when the allocated heap
   993  	// has grown by GOGC/100 over where it started the last cycle,
   994  	// plus additional runway for non-heap sources of GC work.
   995  	goal := ^uint64(0)
   996  	if gcPercent := c.gcPercent.Load(); gcPercent >= 0 {
   997  		goal = c.heapMarked + (c.heapMarked+atomic.Load64(&c.stackScan)+atomic.Load64(&c.globalsScan))*uint64(gcPercent)/100
   998  	}
   999  
  1000  	// Don't trigger below the minimum heap size.
  1001  	minTrigger := c.heapMinimum
  1002  	if !isSweepDone() {
  1003  		// Concurrent sweep happens in the heap growth
  1004  		// from gcController.heapLive to trigger, so ensure
  1005  		// that concurrent sweep has some heap growth
  1006  		// in which to perform sweeping before we
  1007  		// start the next GC cycle.
  1008  		sweepMin := atomic.Load64(&c.heapLive) + sweepMinHeapDistance
  1009  		if sweepMin > minTrigger {
  1010  			minTrigger = sweepMin
  1011  		}
  1012  	}
  1013  
  1014  	// If we let the trigger go too low, then if the application
  1015  	// is allocating very rapidly we might end up in a situation
  1016  	// where we're allocating black during a nearly always-on GC.
  1017  	// The result of this is a growing heap and ultimately an
  1018  	// increase in RSS. By capping us at a point >0, we're essentially
  1019  	// saying that we're OK using more CPU during the GC to prevent
  1020  	// this growth in RSS.
  1021  	//
  1022  	// The current constant was chosen empirically: given a sufficiently
  1023  	// fast/scalable allocator with 48 Ps that could drive the trigger ratio
  1024  	// to <0.05, this constant causes applications to retain the same peak
  1025  	// RSS compared to not having this allocator.
  1026  	if triggerBound := uint64(0.7*float64(goal-c.heapMarked)) + c.heapMarked; minTrigger < triggerBound {
  1027  		minTrigger = triggerBound
  1028  	}
  1029  
  1030  	// For small heaps, set the max trigger point at 95% of the heap goal.
  1031  	// This ensures we always have *some* headroom when the GC actually starts.
  1032  	// For larger heaps, set the max trigger point at the goal, minus the
  1033  	// minimum heap size.
  1034  	// This choice follows from the fact that the minimum heap size is chosen
  1035  	// to reflect the costs of a GC with no work to do. With a large heap but
  1036  	// very little scan work to perform, this gives us exactly as much runway
  1037  	// as we would need, in the worst case.
  1038  	maxRunway := uint64(0.95 * float64(goal-c.heapMarked))
  1039  	if largeHeapMaxRunway := goal - c.heapMinimum; goal > c.heapMinimum && maxRunway < largeHeapMaxRunway {
  1040  		maxRunway = largeHeapMaxRunway
  1041  	}
  1042  	maxTrigger := maxRunway + c.heapMarked
  1043  	if maxTrigger < minTrigger {
  1044  		maxTrigger = minTrigger
  1045  	}
  1046  
  1047  	// Compute the trigger by using our estimate of the cons/mark ratio.
  1048  	//
  1049  	// The idea is to take our expected scan work, and multiply it by
  1050  	// the cons/mark ratio to determine how long it'll take to complete
  1051  	// that scan work in terms of bytes allocated. This gives us our GC's
  1052  	// runway.
  1053  	//
  1054  	// However, the cons/mark ratio is a ratio of rates per CPU-second, but
  1055  	// here we care about the relative rates for some division of CPU
  1056  	// resources among the mutator and the GC.
  1057  	//
  1058  	// To summarize, we have B / cpu-ns, and we want B / ns. We get that
  1059  	// by multiplying by our desired division of CPU resources. We choose
  1060  	// to express CPU resources as GOMAPROCS*fraction. Note that because
  1061  	// we're working with a ratio here, we can omit the number of CPU cores,
  1062  	// because they'll appear in the numerator and denominator and cancel out.
  1063  	// As a result, this is basically just "weighing" the cons/mark ratio by
  1064  	// our desired division of resources.
  1065  	//
  1066  	// Furthermore, by setting the trigger so that CPU resources are divided
  1067  	// this way, assuming that the cons/mark ratio is correct, we make that
  1068  	// division a reality.
  1069  	var trigger uint64
  1070  	runway := uint64((c.consMark * (1 - gcGoalUtilization) / (gcGoalUtilization)) * float64(c.lastHeapScan+c.stackScan+c.globalsScan))
  1071  	if runway > goal {
  1072  		trigger = minTrigger
  1073  	} else {
  1074  		trigger = goal - runway
  1075  	}
  1076  	if trigger < minTrigger {
  1077  		trigger = minTrigger
  1078  	}
  1079  	if trigger > maxTrigger {
  1080  		trigger = maxTrigger
  1081  	}
  1082  	if trigger > goal {
  1083  		goal = trigger
  1084  	}
  1085  
  1086  	// Commit to the trigger and goal.
  1087  	c.trigger = trigger
  1088  	atomic.Store64(&c.heapGoal, goal)
  1089  	if trace.enabled {
  1090  		traceHeapGoal()
  1091  	}
  1092  
  1093  	// Update mark pacing.
  1094  	if gcphase != _GCoff {
  1095  		c.revise()
  1096  	}
  1097  }
  1098  
  1099  // oldCommit sets the trigger ratio and updates everything
  1100  // derived from it: the absolute trigger, the heap goal, mark pacing,
  1101  // and sweep pacing.
  1102  //
  1103  // This can be called any time. If GC is the in the middle of a
  1104  // concurrent phase, it will adjust the pacing of that phase.
  1105  //
  1106  // This depends on gcPercent, gcController.heapMarked, and
  1107  // gcController.heapLive. These must be up to date.
  1108  //
  1109  // For !goexperiment.PacerRedesign.
  1110  func (c *gcControllerState) oldCommit(triggerRatio float64) {
  1111  	gcPercent := c.gcPercent.Load()
  1112  
  1113  	// Compute the next GC goal, which is when the allocated heap
  1114  	// has grown by GOGC/100 over the heap marked by the last
  1115  	// cycle.
  1116  	goal := ^uint64(0)
  1117  	if gcPercent >= 0 {
  1118  		goal = c.heapMarked + c.heapMarked*uint64(gcPercent)/100
  1119  	}
  1120  
  1121  	// Set the trigger ratio, capped to reasonable bounds.
  1122  	if gcPercent >= 0 {
  1123  		scalingFactor := float64(gcPercent) / 100
  1124  		// Ensure there's always a little margin so that the
  1125  		// mutator assist ratio isn't infinity.
  1126  		maxTriggerRatio := 0.95 * scalingFactor
  1127  		if triggerRatio > maxTriggerRatio {
  1128  			triggerRatio = maxTriggerRatio
  1129  		}
  1130  
  1131  		// If we let triggerRatio go too low, then if the application
  1132  		// is allocating very rapidly we might end up in a situation
  1133  		// where we're allocating black during a nearly always-on GC.
  1134  		// The result of this is a growing heap and ultimately an
  1135  		// increase in RSS. By capping us at a point >0, we're essentially
  1136  		// saying that we're OK using more CPU during the GC to prevent
  1137  		// this growth in RSS.
  1138  		//
  1139  		// The current constant was chosen empirically: given a sufficiently
  1140  		// fast/scalable allocator with 48 Ps that could drive the trigger ratio
  1141  		// to <0.05, this constant causes applications to retain the same peak
  1142  		// RSS compared to not having this allocator.
  1143  		minTriggerRatio := 0.6 * scalingFactor
  1144  		if triggerRatio < minTriggerRatio {
  1145  			triggerRatio = minTriggerRatio
  1146  		}
  1147  	} else if triggerRatio < 0 {
  1148  		// gcPercent < 0, so just make sure we're not getting a negative
  1149  		// triggerRatio. This case isn't expected to happen in practice,
  1150  		// and doesn't really matter because if gcPercent < 0 then we won't
  1151  		// ever consume triggerRatio further on in this function, but let's
  1152  		// just be defensive here; the triggerRatio being negative is almost
  1153  		// certainly undesirable.
  1154  		triggerRatio = 0
  1155  	}
  1156  	c.triggerRatio = triggerRatio
  1157  
  1158  	// Compute the absolute GC trigger from the trigger ratio.
  1159  	//
  1160  	// We trigger the next GC cycle when the allocated heap has
  1161  	// grown by the trigger ratio over the marked heap size.
  1162  	trigger := ^uint64(0)
  1163  	if gcPercent >= 0 {
  1164  		trigger = uint64(float64(c.heapMarked) * (1 + triggerRatio))
  1165  		// Don't trigger below the minimum heap size.
  1166  		minTrigger := c.heapMinimum
  1167  		if !isSweepDone() {
  1168  			// Concurrent sweep happens in the heap growth
  1169  			// from gcController.heapLive to trigger, so ensure
  1170  			// that concurrent sweep has some heap growth
  1171  			// in which to perform sweeping before we
  1172  			// start the next GC cycle.
  1173  			sweepMin := atomic.Load64(&c.heapLive) + sweepMinHeapDistance
  1174  			if sweepMin > minTrigger {
  1175  				minTrigger = sweepMin
  1176  			}
  1177  		}
  1178  		if trigger < minTrigger {
  1179  			trigger = minTrigger
  1180  		}
  1181  		if int64(trigger) < 0 {
  1182  			print("runtime: heapGoal=", c.heapGoal, " heapMarked=", c.heapMarked, " gcController.heapLive=", c.heapLive, " initialHeapLive=", work.initialHeapLive, "triggerRatio=", triggerRatio, " minTrigger=", minTrigger, "\n")
  1183  			throw("trigger underflow")
  1184  		}
  1185  		if trigger > goal {
  1186  			// The trigger ratio is always less than GOGC/100, but
  1187  			// other bounds on the trigger may have raised it.
  1188  			// Push up the goal, too.
  1189  			goal = trigger
  1190  		}
  1191  	}
  1192  
  1193  	// Commit to the trigger and goal.
  1194  	c.trigger = trigger
  1195  	atomic.Store64(&c.heapGoal, goal)
  1196  	if trace.enabled {
  1197  		traceHeapGoal()
  1198  	}
  1199  
  1200  	// Update mark pacing.
  1201  	if gcphase != _GCoff {
  1202  		c.revise()
  1203  	}
  1204  }
  1205  
  1206  // effectiveGrowthRatio returns the current effective heap growth
  1207  // ratio (GOGC/100) based on heapMarked from the previous GC and
  1208  // heapGoal for the current GC.
  1209  //
  1210  // This may differ from gcPercent/100 because of various upper and
  1211  // lower bounds on gcPercent. For example, if the heap is smaller than
  1212  // heapMinimum, this can be higher than gcPercent/100.
  1213  //
  1214  // mheap_.lock must be held or the world must be stopped.
  1215  func (c *gcControllerState) effectiveGrowthRatio() float64 {
  1216  	if !c.test {
  1217  		assertWorldStoppedOrLockHeld(&mheap_.lock)
  1218  	}
  1219  
  1220  	egogc := float64(atomic.Load64(&c.heapGoal)-c.heapMarked) / float64(c.heapMarked)
  1221  	if egogc < 0 {
  1222  		// Shouldn't happen, but just in case.
  1223  		egogc = 0
  1224  	}
  1225  	return egogc
  1226  }
  1227  
  1228  // setGCPercent updates gcPercent and all related pacer state.
  1229  // Returns the old value of gcPercent.
  1230  //
  1231  // Calls gcControllerState.commit.
  1232  //
  1233  // The world must be stopped, or mheap_.lock must be held.
  1234  func (c *gcControllerState) setGCPercent(in int32) int32 {
  1235  	if !c.test {
  1236  		assertWorldStoppedOrLockHeld(&mheap_.lock)
  1237  	}
  1238  
  1239  	out := c.gcPercent.Load()
  1240  	if in < 0 {
  1241  		in = -1
  1242  	}
  1243  	c.heapMinimum = defaultHeapMinimum * uint64(in) / 100
  1244  	c.gcPercent.Store(in)
  1245  	// Update pacing in response to gcPercent change.
  1246  	c.commit(c.triggerRatio)
  1247  
  1248  	return out
  1249  }
  1250  
  1251  //go:linkname setGCPercent runtime/debug.setGCPercent
  1252  func setGCPercent(in int32) (out int32) {
  1253  	// Run on the system stack since we grab the heap lock.
  1254  	systemstack(func() {
  1255  		lock(&mheap_.lock)
  1256  		out = gcController.setGCPercent(in)
  1257  		gcPaceSweeper(gcController.trigger)
  1258  		gcPaceScavenger(gcController.heapGoal, gcController.lastHeapGoal)
  1259  		unlock(&mheap_.lock)
  1260  	})
  1261  
  1262  	// If we just disabled GC, wait for any concurrent GC mark to
  1263  	// finish so we always return with no GC running.
  1264  	if in < 0 {
  1265  		gcWaitOnMark(atomic.Load(&work.cycles))
  1266  	}
  1267  
  1268  	return out
  1269  }
  1270  
  1271  func readGOGC() int32 {
  1272  	p := gogetenv("GOGC")
  1273  	if p == "off" {
  1274  		return -1
  1275  	}
  1276  	if n, ok := atoi32(p); ok {
  1277  		return n
  1278  	}
  1279  	return 100
  1280  }
  1281  
  1282  type piController struct {
  1283  	kp float64 // Proportional constant.
  1284  	ti float64 // Integral time constant.
  1285  	tt float64 // Reset time.
  1286  
  1287  	min, max float64 // Output boundaries.
  1288  
  1289  	// PI controller state.
  1290  
  1291  	errIntegral float64 // Integral of the error from t=0 to now.
  1292  
  1293  	// Error flags.
  1294  	errOverflow   bool // Set if errIntegral ever overflowed.
  1295  	inputOverflow bool // Set if an operation with the input overflowed.
  1296  }
  1297  
  1298  // next provides a new sample to the controller.
  1299  //
  1300  // input is the sample, setpoint is the desired point, and period is how much
  1301  // time (in whatever unit makes the most sense) has passed since the last sample.
  1302  //
  1303  // Returns a new value for the variable it's controlling, and whether the operation
  1304  // completed successfully. One reason this might fail is if error has been growing
  1305  // in an unbounded manner, to the point of overflow.
  1306  //
  1307  // In the specific case of an error overflow occurs, the errOverflow field will be
  1308  // set and the rest of the controller's internal state will be fully reset.
  1309  func (c *piController) next(input, setpoint, period float64) (float64, bool) {
  1310  	// Compute the raw output value.
  1311  	prop := c.kp * (setpoint - input)
  1312  	rawOutput := prop + c.errIntegral
  1313  
  1314  	// Clamp rawOutput into output.
  1315  	output := rawOutput
  1316  	if isInf(output) || isNaN(output) {
  1317  		// The input had a large enough magnitude that either it was already
  1318  		// overflowed, or some operation with it overflowed.
  1319  		// Set a flag and reset. That's the safest thing to do.
  1320  		c.reset()
  1321  		c.inputOverflow = true
  1322  		return c.min, false
  1323  	}
  1324  	if output < c.min {
  1325  		output = c.min
  1326  	} else if output > c.max {
  1327  		output = c.max
  1328  	}
  1329  
  1330  	// Update the controller's state.
  1331  	if c.ti != 0 && c.tt != 0 {
  1332  		c.errIntegral += (c.kp*period/c.ti)*(setpoint-input) + (period/c.tt)*(output-rawOutput)
  1333  		if isInf(c.errIntegral) || isNaN(c.errIntegral) {
  1334  			// So much error has accumulated that we managed to overflow.
  1335  			// The assumptions around the controller have likely broken down.
  1336  			// Set a flag and reset. That's the safest thing to do.
  1337  			c.reset()
  1338  			c.errOverflow = true
  1339  			return c.min, false
  1340  		}
  1341  	}
  1342  	return output, true
  1343  }
  1344  
  1345  // reset resets the controller state, except for controller error flags.
  1346  func (c *piController) reset() {
  1347  	c.errIntegral = 0
  1348  }
  1349  

View as plain text