Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/mgcpacer.go

Documentation: runtime

     1  // Copyright 2021 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/cpu"
     9  	"runtime/internal/atomic"
    10  	"unsafe"
    11  )
    12  
    13  const (
    14  	// gcGoalUtilization is the goal CPU utilization for
    15  	// marking as a fraction of GOMAXPROCS.
    16  	gcGoalUtilization = 0.30
    17  
    18  	// gcBackgroundUtilization is the fixed CPU utilization for background
    19  	// marking. It must be <= gcGoalUtilization. The difference between
    20  	// gcGoalUtilization and gcBackgroundUtilization will be made up by
    21  	// mark assists. The scheduler will aim to use within 50% of this
    22  	// goal.
    23  	//
    24  	// Setting this to < gcGoalUtilization avoids saturating the trigger
    25  	// feedback controller when there are no assists, which allows it to
    26  	// better control CPU and heap growth. However, the larger the gap,
    27  	// the more mutator assists are expected to happen, which impact
    28  	// mutator latency.
    29  	gcBackgroundUtilization = 0.25
    30  
    31  	// gcCreditSlack is the amount of scan work credit that can
    32  	// accumulate locally before updating gcController.scanWork and,
    33  	// optionally, gcController.bgScanCredit. Lower values give a more
    34  	// accurate assist ratio and make it more likely that assists will
    35  	// successfully steal background credit. Higher values reduce memory
    36  	// contention.
    37  	gcCreditSlack = 2000
    38  
    39  	// gcAssistTimeSlack is the nanoseconds of mutator assist time that
    40  	// can accumulate on a P before updating gcController.assistTime.
    41  	gcAssistTimeSlack = 5000
    42  
    43  	// gcOverAssistWork determines how many extra units of scan work a GC
    44  	// assist does when an assist happens. This amortizes the cost of an
    45  	// assist by pre-paying for this many bytes of future allocations.
    46  	gcOverAssistWork = 64 << 10
    47  
    48  	// defaultHeapMinimum is the value of heapMinimum for GOGC==100.
    49  	defaultHeapMinimum = 4 << 20
    50  )
    51  
    52  func init() {
    53  	if offset := unsafe.Offsetof(gcController.heapLive); offset%8 != 0 {
    54  		println(offset)
    55  		throw("gcController.heapLive not aligned to 8 bytes")
    56  	}
    57  }
    58  
    59  // gcController implements the GC pacing controller that determines
    60  // when to trigger concurrent garbage collection and how much marking
    61  // work to do in mutator assists and background marking.
    62  //
    63  // It uses a feedback control algorithm to adjust the gcController.trigger
    64  // trigger based on the heap growth and GC CPU utilization each cycle.
    65  // This algorithm optimizes for heap growth to match GOGC and for CPU
    66  // utilization between assist and background marking to be 25% of
    67  // GOMAXPROCS. The high-level design of this algorithm is documented
    68  // at https://golang.org/s/go15gcpacing.
    69  //
    70  // All fields of gcController are used only during a single mark
    71  // cycle.
    72  var gcController gcControllerState
    73  
    74  type gcControllerState struct {
    75  	// Initialized from $GOGC. GOGC=off means no GC.
    76  	gcPercent int32
    77  
    78  	_ uint32 // padding so following 64-bit values are 8-byte aligned
    79  
    80  	// heapMinimum is the minimum heap size at which to trigger GC.
    81  	// For small heaps, this overrides the usual GOGC*live set rule.
    82  	//
    83  	// When there is a very small live set but a lot of allocation, simply
    84  	// collecting when the heap reaches GOGC*live results in many GC
    85  	// cycles and high total per-GC overhead. This minimum amortizes this
    86  	// per-GC overhead while keeping the heap reasonably small.
    87  	//
    88  	// During initialization this is set to 4MB*GOGC/100. In the case of
    89  	// GOGC==0, this will set heapMinimum to 0, resulting in constant
    90  	// collection even when the heap size is small, which is useful for
    91  	// debugging.
    92  	heapMinimum uint64
    93  
    94  	// triggerRatio is the heap growth ratio that triggers marking.
    95  	//
    96  	// E.g., if this is 0.6, then GC should start when the live
    97  	// heap has reached 1.6 times the heap size marked by the
    98  	// previous cycle. This should be ≤ GOGC/100 so the trigger
    99  	// heap size is less than the goal heap size. This is set
   100  	// during mark termination for the next cycle's trigger.
   101  	//
   102  	// Protected by mheap_.lock or a STW.
   103  	triggerRatio float64
   104  
   105  	// trigger is the heap size that triggers marking.
   106  	//
   107  	// When heapLive ≥ trigger, the mark phase will start.
   108  	// This is also the heap size by which proportional sweeping
   109  	// must be complete.
   110  	//
   111  	// This is computed from triggerRatio during mark termination
   112  	// for the next cycle's trigger.
   113  	//
   114  	// Protected by mheap_.lock or a STW.
   115  	trigger uint64
   116  
   117  	// heapGoal is the goal heapLive for when next GC ends.
   118  	// Set to ^uint64(0) if disabled.
   119  	//
   120  	// Read and written atomically, unless the world is stopped.
   121  	heapGoal uint64
   122  
   123  	// lastHeapGoal is the value of heapGoal for the previous GC.
   124  	// Note that this is distinct from the last value heapGoal had,
   125  	// because it could change if e.g. gcPercent changes.
   126  	//
   127  	// Read and written with the world stopped or with mheap_.lock held.
   128  	lastHeapGoal uint64
   129  
   130  	// heapLive is the number of bytes considered live by the GC.
   131  	// That is: retained by the most recent GC plus allocated
   132  	// since then. heapLive ≤ memstats.heapAlloc, since heapAlloc includes
   133  	// unmarked objects that have not yet been swept (and hence goes up as we
   134  	// allocate and down as we sweep) while heapLive excludes these
   135  	// objects (and hence only goes up between GCs).
   136  	//
   137  	// This is updated atomically without locking. To reduce
   138  	// contention, this is updated only when obtaining a span from
   139  	// an mcentral and at this point it counts all of the
   140  	// unallocated slots in that span (which will be allocated
   141  	// before that mcache obtains another span from that
   142  	// mcentral). Hence, it slightly overestimates the "true" live
   143  	// heap size. It's better to overestimate than to
   144  	// underestimate because 1) this triggers the GC earlier than
   145  	// necessary rather than potentially too late and 2) this
   146  	// leads to a conservative GC rate rather than a GC rate that
   147  	// is potentially too low.
   148  	//
   149  	// Reads should likewise be atomic (or during STW).
   150  	//
   151  	// Whenever this is updated, call traceHeapAlloc() and
   152  	// this gcControllerState's revise() method.
   153  	heapLive uint64
   154  
   155  	// heapScan is the number of bytes of "scannable" heap. This
   156  	// is the live heap (as counted by heapLive), but omitting
   157  	// no-scan objects and no-scan tails of objects.
   158  	//
   159  	// Whenever this is updated, call this gcControllerState's
   160  	// revise() method.
   161  	//
   162  	// Read and written atomically or with the world stopped.
   163  	heapScan uint64
   164  
   165  	// heapMarked is the number of bytes marked by the previous
   166  	// GC. After mark termination, heapLive == heapMarked, but
   167  	// unlike heapLive, heapMarked does not change until the
   168  	// next mark termination.
   169  	heapMarked uint64
   170  
   171  	// scanWork is the total scan work performed this cycle. This
   172  	// is updated atomically during the cycle. Updates occur in
   173  	// bounded batches, since it is both written and read
   174  	// throughout the cycle. At the end of the cycle, this is how
   175  	// much of the retained heap is scannable.
   176  	//
   177  	// Currently this is the bytes of heap scanned. For most uses,
   178  	// this is an opaque unit of work, but for estimation the
   179  	// definition is important.
   180  	scanWork int64
   181  
   182  	// bgScanCredit is the scan work credit accumulated by the
   183  	// concurrent background scan. This credit is accumulated by
   184  	// the background scan and stolen by mutator assists. This is
   185  	// updated atomically. Updates occur in bounded batches, since
   186  	// it is both written and read throughout the cycle.
   187  	bgScanCredit int64
   188  
   189  	// assistTime is the nanoseconds spent in mutator assists
   190  	// during this cycle. This is updated atomically. Updates
   191  	// occur in bounded batches, since it is both written and read
   192  	// throughout the cycle.
   193  	assistTime int64
   194  
   195  	// dedicatedMarkTime is the nanoseconds spent in dedicated
   196  	// mark workers during this cycle. This is updated atomically
   197  	// at the end of the concurrent mark phase.
   198  	dedicatedMarkTime int64
   199  
   200  	// fractionalMarkTime is the nanoseconds spent in the
   201  	// fractional mark worker during this cycle. This is updated
   202  	// atomically throughout the cycle and will be up-to-date if
   203  	// the fractional mark worker is not currently running.
   204  	fractionalMarkTime int64
   205  
   206  	// idleMarkTime is the nanoseconds spent in idle marking
   207  	// during this cycle. This is updated atomically throughout
   208  	// the cycle.
   209  	idleMarkTime int64
   210  
   211  	// markStartTime is the absolute start time in nanoseconds
   212  	// that assists and background mark workers started.
   213  	markStartTime int64
   214  
   215  	// dedicatedMarkWorkersNeeded is the number of dedicated mark
   216  	// workers that need to be started. This is computed at the
   217  	// beginning of each cycle and decremented atomically as
   218  	// dedicated mark workers get started.
   219  	dedicatedMarkWorkersNeeded int64
   220  
   221  	// assistWorkPerByte is the ratio of scan work to allocated
   222  	// bytes that should be performed by mutator assists. This is
   223  	// computed at the beginning of each cycle and updated every
   224  	// time heapScan is updated.
   225  	//
   226  	// Stored as a uint64, but it's actually a float64. Use
   227  	// float64frombits to get the value.
   228  	//
   229  	// Read and written atomically.
   230  	assistWorkPerByte uint64
   231  
   232  	// assistBytesPerWork is 1/assistWorkPerByte.
   233  	//
   234  	// Stored as a uint64, but it's actually a float64. Use
   235  	// float64frombits to get the value.
   236  	//
   237  	// Read and written atomically.
   238  	//
   239  	// Note that because this is read and written independently
   240  	// from assistWorkPerByte users may notice a skew between
   241  	// the two values, and such a state should be safe.
   242  	assistBytesPerWork uint64
   243  
   244  	// fractionalUtilizationGoal is the fraction of wall clock
   245  	// time that should be spent in the fractional mark worker on
   246  	// each P that isn't running a dedicated worker.
   247  	//
   248  	// For example, if the utilization goal is 25% and there are
   249  	// no dedicated workers, this will be 0.25. If the goal is
   250  	// 25%, there is one dedicated worker, and GOMAXPROCS is 5,
   251  	// this will be 0.05 to make up the missing 5%.
   252  	//
   253  	// If this is zero, no fractional workers are needed.
   254  	fractionalUtilizationGoal float64
   255  
   256  	_ cpu.CacheLinePad
   257  }
   258  
   259  func (c *gcControllerState) init(gcPercent int32) {
   260  	c.heapMinimum = defaultHeapMinimum
   261  
   262  	// Set a reasonable initial GC trigger.
   263  	c.triggerRatio = 7 / 8.0
   264  
   265  	// Fake a heapMarked value so it looks like a trigger at
   266  	// heapMinimum is the appropriate growth from heapMarked.
   267  	// This will go into computing the initial GC goal.
   268  	c.heapMarked = uint64(float64(c.heapMinimum) / (1 + c.triggerRatio))
   269  
   270  	// This will also compute and set the GC trigger and goal.
   271  	c.setGCPercent(gcPercent)
   272  }
   273  
   274  // startCycle resets the GC controller's state and computes estimates
   275  // for a new GC cycle. The caller must hold worldsema and the world
   276  // must be stopped.
   277  func (c *gcControllerState) startCycle() {
   278  	c.scanWork = 0
   279  	c.bgScanCredit = 0
   280  	c.assistTime = 0
   281  	c.dedicatedMarkTime = 0
   282  	c.fractionalMarkTime = 0
   283  	c.idleMarkTime = 0
   284  
   285  	// Ensure that the heap goal is at least a little larger than
   286  	// the current live heap size. This may not be the case if GC
   287  	// start is delayed or if the allocation that pushed gcController.heapLive
   288  	// over trigger is large or if the trigger is really close to
   289  	// GOGC. Assist is proportional to this distance, so enforce a
   290  	// minimum distance, even if it means going over the GOGC goal
   291  	// by a tiny bit.
   292  	if c.heapGoal < c.heapLive+1024*1024 {
   293  		c.heapGoal = c.heapLive + 1024*1024
   294  	}
   295  
   296  	// Compute the background mark utilization goal. In general,
   297  	// this may not come out exactly. We round the number of
   298  	// dedicated workers so that the utilization is closest to
   299  	// 25%. For small GOMAXPROCS, this would introduce too much
   300  	// error, so we add fractional workers in that case.
   301  	totalUtilizationGoal := float64(gomaxprocs) * gcBackgroundUtilization
   302  	c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal + 0.5)
   303  	utilError := float64(c.dedicatedMarkWorkersNeeded)/totalUtilizationGoal - 1
   304  	const maxUtilError = 0.3
   305  	if utilError < -maxUtilError || utilError > maxUtilError {
   306  		// Rounding put us more than 30% off our goal. With
   307  		// gcBackgroundUtilization of 25%, this happens for
   308  		// GOMAXPROCS<=3 or GOMAXPROCS=6. Enable fractional
   309  		// workers to compensate.
   310  		if float64(c.dedicatedMarkWorkersNeeded) > totalUtilizationGoal {
   311  			// Too many dedicated workers.
   312  			c.dedicatedMarkWorkersNeeded--
   313  		}
   314  		c.fractionalUtilizationGoal = (totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded)) / float64(gomaxprocs)
   315  	} else {
   316  		c.fractionalUtilizationGoal = 0
   317  	}
   318  
   319  	// In STW mode, we just want dedicated workers.
   320  	if debug.gcstoptheworld > 0 {
   321  		c.dedicatedMarkWorkersNeeded = int64(gomaxprocs)
   322  		c.fractionalUtilizationGoal = 0
   323  	}
   324  
   325  	// Clear per-P state
   326  	for _, p := range allp {
   327  		p.gcAssistTime = 0
   328  		p.gcFractionalMarkTime = 0
   329  	}
   330  
   331  	// Compute initial values for controls that are updated
   332  	// throughout the cycle.
   333  	c.revise()
   334  
   335  	if debug.gcpacertrace > 0 {
   336  		assistRatio := float64frombits(atomic.Load64(&c.assistWorkPerByte))
   337  		print("pacer: assist ratio=", assistRatio,
   338  			" (scan ", gcController.heapScan>>20, " MB in ",
   339  			work.initialHeapLive>>20, "->",
   340  			c.heapGoal>>20, " MB)",
   341  			" workers=", c.dedicatedMarkWorkersNeeded,
   342  			"+", c.fractionalUtilizationGoal, "\n")
   343  	}
   344  }
   345  
   346  // revise updates the assist ratio during the GC cycle to account for
   347  // improved estimates. This should be called whenever gcController.heapScan,
   348  // gcController.heapLive, or gcController.heapGoal is updated. It is safe to
   349  // call concurrently, but it may race with other calls to revise.
   350  //
   351  // The result of this race is that the two assist ratio values may not line
   352  // up or may be stale. In practice this is OK because the assist ratio
   353  // moves slowly throughout a GC cycle, and the assist ratio is a best-effort
   354  // heuristic anyway. Furthermore, no part of the heuristic depends on
   355  // the two assist ratio values being exact reciprocals of one another, since
   356  // the two values are used to convert values from different sources.
   357  //
   358  // The worst case result of this raciness is that we may miss a larger shift
   359  // in the ratio (say, if we decide to pace more aggressively against the
   360  // hard heap goal) but even this "hard goal" is best-effort (see #40460).
   361  // The dedicated GC should ensure we don't exceed the hard goal by too much
   362  // in the rare case we do exceed it.
   363  //
   364  // It should only be called when gcBlackenEnabled != 0 (because this
   365  // is when assists are enabled and the necessary statistics are
   366  // available).
   367  func (c *gcControllerState) revise() {
   368  	gcPercent := c.gcPercent
   369  	if gcPercent < 0 {
   370  		// If GC is disabled but we're running a forced GC,
   371  		// act like GOGC is huge for the below calculations.
   372  		gcPercent = 100000
   373  	}
   374  	live := atomic.Load64(&c.heapLive)
   375  	scan := atomic.Load64(&c.heapScan)
   376  	work := atomic.Loadint64(&c.scanWork)
   377  
   378  	// Assume we're under the soft goal. Pace GC to complete at
   379  	// heapGoal assuming the heap is in steady-state.
   380  	heapGoal := int64(atomic.Load64(&c.heapGoal))
   381  
   382  	// Compute the expected scan work remaining.
   383  	//
   384  	// This is estimated based on the expected
   385  	// steady-state scannable heap. For example, with
   386  	// GOGC=100, only half of the scannable heap is
   387  	// expected to be live, so that's what we target.
   388  	//
   389  	// (This is a float calculation to avoid overflowing on
   390  	// 100*heapScan.)
   391  	scanWorkExpected := int64(float64(scan) * 100 / float64(100+gcPercent))
   392  
   393  	if int64(live) > heapGoal || work > scanWorkExpected {
   394  		// We're past the soft goal, or we've already done more scan
   395  		// work than we expected. Pace GC so that in the worst case it
   396  		// will complete by the hard goal.
   397  		const maxOvershoot = 1.1
   398  		heapGoal = int64(float64(heapGoal) * maxOvershoot)
   399  
   400  		// Compute the upper bound on the scan work remaining.
   401  		scanWorkExpected = int64(scan)
   402  	}
   403  
   404  	// Compute the remaining scan work estimate.
   405  	//
   406  	// Note that we currently count allocations during GC as both
   407  	// scannable heap (heapScan) and scan work completed
   408  	// (scanWork), so allocation will change this difference
   409  	// slowly in the soft regime and not at all in the hard
   410  	// regime.
   411  	scanWorkRemaining := scanWorkExpected - work
   412  	if scanWorkRemaining < 1000 {
   413  		// We set a somewhat arbitrary lower bound on
   414  		// remaining scan work since if we aim a little high,
   415  		// we can miss by a little.
   416  		//
   417  		// We *do* need to enforce that this is at least 1,
   418  		// since marking is racy and double-scanning objects
   419  		// may legitimately make the remaining scan work
   420  		// negative, even in the hard goal regime.
   421  		scanWorkRemaining = 1000
   422  	}
   423  
   424  	// Compute the heap distance remaining.
   425  	heapRemaining := heapGoal - int64(live)
   426  	if heapRemaining <= 0 {
   427  		// This shouldn't happen, but if it does, avoid
   428  		// dividing by zero or setting the assist negative.
   429  		heapRemaining = 1
   430  	}
   431  
   432  	// Compute the mutator assist ratio so by the time the mutator
   433  	// allocates the remaining heap bytes up to heapGoal, it will
   434  	// have done (or stolen) the remaining amount of scan work.
   435  	// Note that the assist ratio values are updated atomically
   436  	// but not together. This means there may be some degree of
   437  	// skew between the two values. This is generally OK as the
   438  	// values shift relatively slowly over the course of a GC
   439  	// cycle.
   440  	assistWorkPerByte := float64(scanWorkRemaining) / float64(heapRemaining)
   441  	assistBytesPerWork := float64(heapRemaining) / float64(scanWorkRemaining)
   442  	atomic.Store64(&c.assistWorkPerByte, float64bits(assistWorkPerByte))
   443  	atomic.Store64(&c.assistBytesPerWork, float64bits(assistBytesPerWork))
   444  }
   445  
   446  // endCycle computes the trigger ratio for the next cycle.
   447  // userForced indicates whether the current GC cycle was forced
   448  // by the application.
   449  func (c *gcControllerState) endCycle(userForced bool) float64 {
   450  	if userForced {
   451  		// Forced GC means this cycle didn't start at the
   452  		// trigger, so where it finished isn't good
   453  		// information about how to adjust the trigger.
   454  		// Just leave it where it is.
   455  		return c.triggerRatio
   456  	}
   457  
   458  	// Proportional response gain for the trigger controller. Must
   459  	// be in [0, 1]. Lower values smooth out transient effects but
   460  	// take longer to respond to phase changes. Higher values
   461  	// react to phase changes quickly, but are more affected by
   462  	// transient changes. Values near 1 may be unstable.
   463  	const triggerGain = 0.5
   464  
   465  	// Compute next cycle trigger ratio. First, this computes the
   466  	// "error" for this cycle; that is, how far off the trigger
   467  	// was from what it should have been, accounting for both heap
   468  	// growth and GC CPU utilization. We compute the actual heap
   469  	// growth during this cycle and scale that by how far off from
   470  	// the goal CPU utilization we were (to estimate the heap
   471  	// growth if we had the desired CPU utilization). The
   472  	// difference between this estimate and the GOGC-based goal
   473  	// heap growth is the error.
   474  	goalGrowthRatio := c.effectiveGrowthRatio()
   475  	actualGrowthRatio := float64(c.heapLive)/float64(c.heapMarked) - 1
   476  	assistDuration := nanotime() - c.markStartTime
   477  
   478  	// Assume background mark hit its utilization goal.
   479  	utilization := gcBackgroundUtilization
   480  	// Add assist utilization; avoid divide by zero.
   481  	if assistDuration > 0 {
   482  		utilization += float64(c.assistTime) / float64(assistDuration*int64(gomaxprocs))
   483  	}
   484  
   485  	triggerError := goalGrowthRatio - c.triggerRatio - utilization/gcGoalUtilization*(actualGrowthRatio-c.triggerRatio)
   486  
   487  	// Finally, we adjust the trigger for next time by this error,
   488  	// damped by the proportional gain.
   489  	triggerRatio := c.triggerRatio + triggerGain*triggerError
   490  
   491  	if debug.gcpacertrace > 0 {
   492  		// Print controller state in terms of the design
   493  		// document.
   494  		H_m_prev := c.heapMarked
   495  		h_t := c.triggerRatio
   496  		H_T := c.trigger
   497  		h_a := actualGrowthRatio
   498  		H_a := c.heapLive
   499  		h_g := goalGrowthRatio
   500  		H_g := int64(float64(H_m_prev) * (1 + h_g))
   501  		u_a := utilization
   502  		u_g := gcGoalUtilization
   503  		W_a := c.scanWork
   504  		print("pacer: H_m_prev=", H_m_prev,
   505  			" h_t=", h_t, " H_T=", H_T,
   506  			" h_a=", h_a, " H_a=", H_a,
   507  			" h_g=", h_g, " H_g=", H_g,
   508  			" u_a=", u_a, " u_g=", u_g,
   509  			" W_a=", W_a,
   510  			" goalΔ=", goalGrowthRatio-h_t,
   511  			" actualΔ=", h_a-h_t,
   512  			" u_a/u_g=", u_a/u_g,
   513  			"\n")
   514  	}
   515  
   516  	return triggerRatio
   517  }
   518  
   519  // enlistWorker encourages another dedicated mark worker to start on
   520  // another P if there are spare worker slots. It is used by putfull
   521  // when more work is made available.
   522  //
   523  //go:nowritebarrier
   524  func (c *gcControllerState) enlistWorker() {
   525  	// If there are idle Ps, wake one so it will run an idle worker.
   526  	// NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112.
   527  	//
   528  	//	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
   529  	//		wakep()
   530  	//		return
   531  	//	}
   532  
   533  	// There are no idle Ps. If we need more dedicated workers,
   534  	// try to preempt a running P so it will switch to a worker.
   535  	if c.dedicatedMarkWorkersNeeded <= 0 {
   536  		return
   537  	}
   538  	// Pick a random other P to preempt.
   539  	if gomaxprocs <= 1 {
   540  		return
   541  	}
   542  	gp := getg()
   543  	if gp == nil || gp.m == nil || gp.m.p == 0 {
   544  		return
   545  	}
   546  	myID := gp.m.p.ptr().id
   547  	for tries := 0; tries < 5; tries++ {
   548  		id := int32(fastrandn(uint32(gomaxprocs - 1)))
   549  		if id >= myID {
   550  			id++
   551  		}
   552  		p := allp[id]
   553  		if p.status != _Prunning {
   554  			continue
   555  		}
   556  		if preemptone(p) {
   557  			return
   558  		}
   559  	}
   560  }
   561  
   562  // findRunnableGCWorker returns a background mark worker for _p_ if it
   563  // should be run. This must only be called when gcBlackenEnabled != 0.
   564  func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g {
   565  	if gcBlackenEnabled == 0 {
   566  		throw("gcControllerState.findRunnable: blackening not enabled")
   567  	}
   568  
   569  	if !gcMarkWorkAvailable(_p_) {
   570  		// No work to be done right now. This can happen at
   571  		// the end of the mark phase when there are still
   572  		// assists tapering off. Don't bother running a worker
   573  		// now because it'll just return immediately.
   574  		return nil
   575  	}
   576  
   577  	// Grab a worker before we commit to running below.
   578  	node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
   579  	if node == nil {
   580  		// There is at least one worker per P, so normally there are
   581  		// enough workers to run on all Ps, if necessary. However, once
   582  		// a worker enters gcMarkDone it may park without rejoining the
   583  		// pool, thus freeing a P with no corresponding worker.
   584  		// gcMarkDone never depends on another worker doing work, so it
   585  		// is safe to simply do nothing here.
   586  		//
   587  		// If gcMarkDone bails out without completing the mark phase,
   588  		// it will always do so with queued global work. Thus, that P
   589  		// will be immediately eligible to re-run the worker G it was
   590  		// just using, ensuring work can complete.
   591  		return nil
   592  	}
   593  
   594  	decIfPositive := func(ptr *int64) bool {
   595  		for {
   596  			v := atomic.Loadint64(ptr)
   597  			if v <= 0 {
   598  				return false
   599  			}
   600  
   601  			if atomic.Casint64(ptr, v, v-1) {
   602  				return true
   603  			}
   604  		}
   605  	}
   606  
   607  	if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
   608  		// This P is now dedicated to marking until the end of
   609  		// the concurrent mark phase.
   610  		_p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
   611  	} else if c.fractionalUtilizationGoal == 0 {
   612  		// No need for fractional workers.
   613  		gcBgMarkWorkerPool.push(&node.node)
   614  		return nil
   615  	} else {
   616  		// Is this P behind on the fractional utilization
   617  		// goal?
   618  		//
   619  		// This should be kept in sync with pollFractionalWorkerExit.
   620  		delta := nanotime() - c.markStartTime
   621  		if delta > 0 && float64(_p_.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal {
   622  			// Nope. No need to run a fractional worker.
   623  			gcBgMarkWorkerPool.push(&node.node)
   624  			return nil
   625  		}
   626  		// Run a fractional worker.
   627  		_p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode
   628  	}
   629  
   630  	// Run the background mark worker.
   631  	gp := node.gp.ptr()
   632  	casgstatus(gp, _Gwaiting, _Grunnable)
   633  	if trace.enabled {
   634  		traceGoUnpark(gp, 0)
   635  	}
   636  	return gp
   637  }
   638  
   639  // commit sets the trigger ratio and updates everything
   640  // derived from it: the absolute trigger, the heap goal, mark pacing,
   641  // and sweep pacing.
   642  //
   643  // This can be called any time. If GC is the in the middle of a
   644  // concurrent phase, it will adjust the pacing of that phase.
   645  //
   646  // This depends on gcPercent, gcController.heapMarked, and
   647  // gcController.heapLive. These must be up to date.
   648  //
   649  // mheap_.lock must be held or the world must be stopped.
   650  func (c *gcControllerState) commit(triggerRatio float64) {
   651  	assertWorldStoppedOrLockHeld(&mheap_.lock)
   652  
   653  	// Compute the next GC goal, which is when the allocated heap
   654  	// has grown by GOGC/100 over the heap marked by the last
   655  	// cycle.
   656  	goal := ^uint64(0)
   657  	if c.gcPercent >= 0 {
   658  		goal = c.heapMarked + c.heapMarked*uint64(c.gcPercent)/100
   659  	}
   660  
   661  	// Set the trigger ratio, capped to reasonable bounds.
   662  	if c.gcPercent >= 0 {
   663  		scalingFactor := float64(c.gcPercent) / 100
   664  		// Ensure there's always a little margin so that the
   665  		// mutator assist ratio isn't infinity.
   666  		maxTriggerRatio := 0.95 * scalingFactor
   667  		if triggerRatio > maxTriggerRatio {
   668  			triggerRatio = maxTriggerRatio
   669  		}
   670  
   671  		// If we let triggerRatio go too low, then if the application
   672  		// is allocating very rapidly we might end up in a situation
   673  		// where we're allocating black during a nearly always-on GC.
   674  		// The result of this is a growing heap and ultimately an
   675  		// increase in RSS. By capping us at a point >0, we're essentially
   676  		// saying that we're OK using more CPU during the GC to prevent
   677  		// this growth in RSS.
   678  		//
   679  		// The current constant was chosen empirically: given a sufficiently
   680  		// fast/scalable allocator with 48 Ps that could drive the trigger ratio
   681  		// to <0.05, this constant causes applications to retain the same peak
   682  		// RSS compared to not having this allocator.
   683  		minTriggerRatio := 0.6 * scalingFactor
   684  		if triggerRatio < minTriggerRatio {
   685  			triggerRatio = minTriggerRatio
   686  		}
   687  	} else if triggerRatio < 0 {
   688  		// gcPercent < 0, so just make sure we're not getting a negative
   689  		// triggerRatio. This case isn't expected to happen in practice,
   690  		// and doesn't really matter because if gcPercent < 0 then we won't
   691  		// ever consume triggerRatio further on in this function, but let's
   692  		// just be defensive here; the triggerRatio being negative is almost
   693  		// certainly undesirable.
   694  		triggerRatio = 0
   695  	}
   696  	c.triggerRatio = triggerRatio
   697  
   698  	// Compute the absolute GC trigger from the trigger ratio.
   699  	//
   700  	// We trigger the next GC cycle when the allocated heap has
   701  	// grown by the trigger ratio over the marked heap size.
   702  	trigger := ^uint64(0)
   703  	if c.gcPercent >= 0 {
   704  		trigger = uint64(float64(c.heapMarked) * (1 + triggerRatio))
   705  		// Don't trigger below the minimum heap size.
   706  		minTrigger := c.heapMinimum
   707  		if !isSweepDone() {
   708  			// Concurrent sweep happens in the heap growth
   709  			// from gcController.heapLive to trigger, so ensure
   710  			// that concurrent sweep has some heap growth
   711  			// in which to perform sweeping before we
   712  			// start the next GC cycle.
   713  			sweepMin := atomic.Load64(&c.heapLive) + sweepMinHeapDistance
   714  			if sweepMin > minTrigger {
   715  				minTrigger = sweepMin
   716  			}
   717  		}
   718  		if trigger < minTrigger {
   719  			trigger = minTrigger
   720  		}
   721  		if int64(trigger) < 0 {
   722  			print("runtime: heapGoal=", c.heapGoal, " heapMarked=", c.heapMarked, " gcController.heapLive=", c.heapLive, " initialHeapLive=", work.initialHeapLive, "triggerRatio=", triggerRatio, " minTrigger=", minTrigger, "\n")
   723  			throw("trigger underflow")
   724  		}
   725  		if trigger > goal {
   726  			// The trigger ratio is always less than GOGC/100, but
   727  			// other bounds on the trigger may have raised it.
   728  			// Push up the goal, too.
   729  			goal = trigger
   730  		}
   731  	}
   732  
   733  	// Commit to the trigger and goal.
   734  	c.trigger = trigger
   735  	atomic.Store64(&c.heapGoal, goal)
   736  	if trace.enabled {
   737  		traceHeapGoal()
   738  	}
   739  
   740  	// Update mark pacing.
   741  	if gcphase != _GCoff {
   742  		c.revise()
   743  	}
   744  
   745  	// Update sweep pacing.
   746  	if isSweepDone() {
   747  		mheap_.sweepPagesPerByte = 0
   748  	} else {
   749  		// Concurrent sweep needs to sweep all of the in-use
   750  		// pages by the time the allocated heap reaches the GC
   751  		// trigger. Compute the ratio of in-use pages to sweep
   752  		// per byte allocated, accounting for the fact that
   753  		// some might already be swept.
   754  		heapLiveBasis := atomic.Load64(&c.heapLive)
   755  		heapDistance := int64(trigger) - int64(heapLiveBasis)
   756  		// Add a little margin so rounding errors and
   757  		// concurrent sweep are less likely to leave pages
   758  		// unswept when GC starts.
   759  		heapDistance -= 1024 * 1024
   760  		if heapDistance < _PageSize {
   761  			// Avoid setting the sweep ratio extremely high
   762  			heapDistance = _PageSize
   763  		}
   764  		pagesSwept := atomic.Load64(&mheap_.pagesSwept)
   765  		pagesInUse := atomic.Load64(&mheap_.pagesInUse)
   766  		sweepDistancePages := int64(pagesInUse) - int64(pagesSwept)
   767  		if sweepDistancePages <= 0 {
   768  			mheap_.sweepPagesPerByte = 0
   769  		} else {
   770  			mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance)
   771  			mheap_.sweepHeapLiveBasis = heapLiveBasis
   772  			// Write pagesSweptBasis last, since this
   773  			// signals concurrent sweeps to recompute
   774  			// their debt.
   775  			atomic.Store64(&mheap_.pagesSweptBasis, pagesSwept)
   776  		}
   777  	}
   778  
   779  	gcPaceScavenger()
   780  }
   781  
   782  // effectiveGrowthRatio returns the current effective heap growth
   783  // ratio (GOGC/100) based on heapMarked from the previous GC and
   784  // heapGoal for the current GC.
   785  //
   786  // This may differ from gcPercent/100 because of various upper and
   787  // lower bounds on gcPercent. For example, if the heap is smaller than
   788  // heapMinimum, this can be higher than gcPercent/100.
   789  //
   790  // mheap_.lock must be held or the world must be stopped.
   791  func (c *gcControllerState) effectiveGrowthRatio() float64 {
   792  	assertWorldStoppedOrLockHeld(&mheap_.lock)
   793  
   794  	egogc := float64(atomic.Load64(&c.heapGoal)-c.heapMarked) / float64(c.heapMarked)
   795  	if egogc < 0 {
   796  		// Shouldn't happen, but just in case.
   797  		egogc = 0
   798  	}
   799  	return egogc
   800  }
   801  
   802  // setGCPercent updates gcPercent and all related pacer state.
   803  // Returns the old value of gcPercent.
   804  //
   805  // The world must be stopped, or mheap_.lock must be held.
   806  func (c *gcControllerState) setGCPercent(in int32) int32 {
   807  	assertWorldStoppedOrLockHeld(&mheap_.lock)
   808  
   809  	out := c.gcPercent
   810  	if in < 0 {
   811  		in = -1
   812  	}
   813  	c.gcPercent = in
   814  	c.heapMinimum = defaultHeapMinimum * uint64(c.gcPercent) / 100
   815  	// Update pacing in response to gcPercent change.
   816  	c.commit(c.triggerRatio)
   817  
   818  	return out
   819  }
   820  
   821  //go:linkname setGCPercent runtime/debug.setGCPercent
   822  func setGCPercent(in int32) (out int32) {
   823  	// Run on the system stack since we grab the heap lock.
   824  	systemstack(func() {
   825  		lock(&mheap_.lock)
   826  		out = gcController.setGCPercent(in)
   827  		unlock(&mheap_.lock)
   828  	})
   829  
   830  	// If we just disabled GC, wait for any concurrent GC mark to
   831  	// finish so we always return with no GC running.
   832  	if in < 0 {
   833  		gcWaitOnMark(atomic.Load(&work.cycles))
   834  	}
   835  
   836  	return out
   837  }
   838  
   839  func readGOGC() int32 {
   840  	p := gogetenv("GOGC")
   841  	if p == "off" {
   842  		return -1
   843  	}
   844  	if n, ok := atoi32(p); ok {
   845  		return n
   846  	}
   847  	return 100
   848  }
   849  

View as plain text