Source file src/runtime/mgcscavenge.go
1 // Copyright 2019 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Scavenging free pages. 6 // 7 // This file implements scavenging (the release of physical pages backing mapped 8 // memory) of free and unused pages in the heap as a way to deal with page-level 9 // fragmentation and reduce the RSS of Go applications. 10 // 11 // Scavenging in Go happens on two fronts: there's the background 12 // (asynchronous) scavenger and the heap-growth (synchronous) scavenger. 13 // 14 // The former happens on a goroutine much like the background sweeper which is 15 // soft-capped at using scavengePercent of the mutator's time, based on 16 // order-of-magnitude estimates of the costs of scavenging. The background 17 // scavenger's primary goal is to bring the estimated heap RSS of the 18 // application down to a goal. 19 // 20 // That goal is defined as: 21 // (retainExtraPercent+100) / 100 * (heapGoal / lastHeapGoal) * last_heap_inuse 22 // 23 // Essentially, we wish to have the application's RSS track the heap goal, but 24 // the heap goal is defined in terms of bytes of objects, rather than pages like 25 // RSS. As a result, we need to take into account for fragmentation internal to 26 // spans. heapGoal / lastHeapGoal defines the ratio between the current heap goal 27 // and the last heap goal, which tells us by how much the heap is growing and 28 // shrinking. We estimate what the heap will grow to in terms of pages by taking 29 // this ratio and multiplying it by heap_inuse at the end of the last GC, which 30 // allows us to account for this additional fragmentation. Note that this 31 // procedure makes the assumption that the degree of fragmentation won't change 32 // dramatically over the next GC cycle. Overestimating the amount of 33 // fragmentation simply results in higher memory use, which will be accounted 34 // for by the next pacing up date. Underestimating the fragmentation however 35 // could lead to performance degradation. Handling this case is not within the 36 // scope of the scavenger. Situations where the amount of fragmentation balloons 37 // over the course of a single GC cycle should be considered pathologies, 38 // flagged as bugs, and fixed appropriately. 39 // 40 // An additional factor of retainExtraPercent is added as a buffer to help ensure 41 // that there's more unscavenged memory to allocate out of, since each allocation 42 // out of scavenged memory incurs a potentially expensive page fault. 43 // 44 // The goal is updated after each GC and the scavenger's pacing parameters 45 // (which live in mheap_) are updated to match. The pacing parameters work much 46 // like the background sweeping parameters. The parameters define a line whose 47 // horizontal axis is time and vertical axis is estimated heap RSS, and the 48 // scavenger attempts to stay below that line at all times. 49 // 50 // The synchronous heap-growth scavenging happens whenever the heap grows in 51 // size, for some definition of heap-growth. The intuition behind this is that 52 // the application had to grow the heap because existing fragments were 53 // not sufficiently large to satisfy a page-level memory allocation, so we 54 // scavenge those fragments eagerly to offset the growth in RSS that results. 55 56 package runtime 57 58 import ( 59 "internal/goos" 60 "runtime/internal/atomic" 61 "runtime/internal/sys" 62 "unsafe" 63 ) 64 65 const ( 66 // The background scavenger is paced according to these parameters. 67 // 68 // scavengePercent represents the portion of mutator time we're willing 69 // to spend on scavenging in percent. 70 scavengePercent = 1 // 1% 71 72 // retainExtraPercent represents the amount of memory over the heap goal 73 // that the scavenger should keep as a buffer space for the allocator. 74 // 75 // The purpose of maintaining this overhead is to have a greater pool of 76 // unscavenged memory available for allocation (since using scavenged memory 77 // incurs an additional cost), to account for heap fragmentation and 78 // the ever-changing layout of the heap. 79 retainExtraPercent = 10 80 81 // maxPagesPerPhysPage is the maximum number of supported runtime pages per 82 // physical page, based on maxPhysPageSize. 83 maxPagesPerPhysPage = maxPhysPageSize / pageSize 84 85 // scavengeCostRatio is the approximate ratio between the costs of using previously 86 // scavenged memory and scavenging memory. 87 // 88 // For most systems the cost of scavenging greatly outweighs the costs 89 // associated with using scavenged memory, making this constant 0. On other systems 90 // (especially ones where "sysUsed" is not just a no-op) this cost is non-trivial. 91 // 92 // This ratio is used as part of multiplicative factor to help the scavenger account 93 // for the additional costs of using scavenged memory in its pacing. 94 scavengeCostRatio = 0.7 * (goos.IsDarwin + goos.IsIos) 95 96 // scavengeReservationShards determines the amount of memory the scavenger 97 // should reserve for scavenging at a time. Specifically, the amount of 98 // memory reserved is (heap size in bytes) / scavengeReservationShards. 99 scavengeReservationShards = 64 100 ) 101 102 // heapRetained returns an estimate of the current heap RSS. 103 func heapRetained() uint64 { 104 return memstats.heap_sys.load() - atomic.Load64(&memstats.heap_released) 105 } 106 107 // gcPaceScavenger updates the scavenger's pacing, particularly 108 // its rate and RSS goal. For this, it requires the current heapGoal, 109 // and the heapGoal for the previous GC cycle. 110 // 111 // The RSS goal is based on the current heap goal with a small overhead 112 // to accommodate non-determinism in the allocator. 113 // 114 // The pacing is based on scavengePageRate, which applies to both regular and 115 // huge pages. See that constant for more information. 116 // 117 // Must be called whenever GC pacing is updated. 118 // 119 // mheap_.lock must be held or the world must be stopped. 120 func gcPaceScavenger(heapGoal, lastHeapGoal uint64) { 121 assertWorldStoppedOrLockHeld(&mheap_.lock) 122 123 // If we're called before the first GC completed, disable scavenging. 124 // We never scavenge before the 2nd GC cycle anyway (we don't have enough 125 // information about the heap yet) so this is fine, and avoids a fault 126 // or garbage data later. 127 if lastHeapGoal == 0 { 128 atomic.Store64(&mheap_.scavengeGoal, ^uint64(0)) 129 return 130 } 131 // Compute our scavenging goal. 132 goalRatio := float64(heapGoal) / float64(lastHeapGoal) 133 retainedGoal := uint64(float64(memstats.last_heap_inuse) * goalRatio) 134 // Add retainExtraPercent overhead to retainedGoal. This calculation 135 // looks strange but the purpose is to arrive at an integer division 136 // (e.g. if retainExtraPercent = 12.5, then we get a divisor of 8) 137 // that also avoids the overflow from a multiplication. 138 retainedGoal += retainedGoal / (1.0 / (retainExtraPercent / 100.0)) 139 // Align it to a physical page boundary to make the following calculations 140 // a bit more exact. 141 retainedGoal = (retainedGoal + uint64(physPageSize) - 1) &^ (uint64(physPageSize) - 1) 142 143 // Represents where we are now in the heap's contribution to RSS in bytes. 144 // 145 // Guaranteed to always be a multiple of physPageSize on systems where 146 // physPageSize <= pageSize since we map heap_sys at a rate larger than 147 // any physPageSize and released memory in multiples of the physPageSize. 148 // 149 // However, certain functions recategorize heap_sys as other stats (e.g. 150 // stack_sys) and this happens in multiples of pageSize, so on systems 151 // where physPageSize > pageSize the calculations below will not be exact. 152 // Generally this is OK since we'll be off by at most one regular 153 // physical page. 154 retainedNow := heapRetained() 155 156 // If we're already below our goal, or within one page of our goal, then disable 157 // the background scavenger. We disable the background scavenger if there's 158 // less than one physical page of work to do because it's not worth it. 159 if retainedNow <= retainedGoal || retainedNow-retainedGoal < uint64(physPageSize) { 160 atomic.Store64(&mheap_.scavengeGoal, ^uint64(0)) 161 return 162 } 163 atomic.Store64(&mheap_.scavengeGoal, retainedGoal) 164 } 165 166 // Sleep/wait state of the background scavenger. 167 var scavenge struct { 168 lock mutex 169 g *g 170 parked bool 171 timer *timer 172 sysmonWake uint32 // Set atomically. 173 printControllerReset bool // Whether the scavenger is in cooldown. 174 } 175 176 // readyForScavenger signals sysmon to wake the scavenger because 177 // there may be new work to do. 178 // 179 // There may be a significant delay between when this function runs 180 // and when the scavenger is kicked awake, but it may be safely invoked 181 // in contexts where wakeScavenger is unsafe to call directly. 182 func readyForScavenger() { 183 atomic.Store(&scavenge.sysmonWake, 1) 184 } 185 186 // wakeScavenger immediately unparks the scavenger if necessary. 187 // 188 // May run without a P, but it may allocate, so it must not be called 189 // on any allocation path. 190 // 191 // mheap_.lock, scavenge.lock, and sched.lock must not be held. 192 func wakeScavenger() { 193 lock(&scavenge.lock) 194 if scavenge.parked { 195 // Notify sysmon that it shouldn't bother waking up the scavenger. 196 atomic.Store(&scavenge.sysmonWake, 0) 197 198 // Try to stop the timer but we don't really care if we succeed. 199 // It's possible that either a timer was never started, or that 200 // we're racing with it. 201 // In the case that we're racing with there's the low chance that 202 // we experience a spurious wake-up of the scavenger, but that's 203 // totally safe. 204 stopTimer(scavenge.timer) 205 206 // Unpark the goroutine and tell it that there may have been a pacing 207 // change. Note that we skip the scheduler's runnext slot because we 208 // want to avoid having the scavenger interfere with the fair 209 // scheduling of user goroutines. In effect, this schedules the 210 // scavenger at a "lower priority" but that's OK because it'll 211 // catch up on the work it missed when it does get scheduled. 212 scavenge.parked = false 213 214 // Ready the goroutine by injecting it. We use injectglist instead 215 // of ready or goready in order to allow us to run this function 216 // without a P. injectglist also avoids placing the goroutine in 217 // the current P's runnext slot, which is desirable to prevent 218 // the scavenger from interfering with user goroutine scheduling 219 // too much. 220 var list gList 221 list.push(scavenge.g) 222 injectglist(&list) 223 } 224 unlock(&scavenge.lock) 225 } 226 227 // scavengeSleep attempts to put the scavenger to sleep for ns. 228 // 229 // Note that this function should only be called by the scavenger. 230 // 231 // The scavenger may be woken up earlier by a pacing change, and it may not go 232 // to sleep at all if there's a pending pacing change. 233 // 234 // Returns the amount of time actually slept. 235 func scavengeSleep(ns int64) int64 { 236 lock(&scavenge.lock) 237 238 // Set the timer. 239 // 240 // This must happen here instead of inside gopark 241 // because we can't close over any variables without 242 // failing escape analysis. 243 start := nanotime() 244 resetTimer(scavenge.timer, start+ns) 245 246 // Mark ourself as asleep and go to sleep. 247 scavenge.parked = true 248 goparkunlock(&scavenge.lock, waitReasonSleep, traceEvGoSleep, 2) 249 250 // Return how long we actually slept for. 251 return nanotime() - start 252 } 253 254 // Background scavenger. 255 // 256 // The background scavenger maintains the RSS of the application below 257 // the line described by the proportional scavenging statistics in 258 // the mheap struct. 259 func bgscavenge(c chan int) { 260 scavenge.g = getg() 261 262 lockInit(&scavenge.lock, lockRankScavenge) 263 lock(&scavenge.lock) 264 scavenge.parked = true 265 266 scavenge.timer = new(timer) 267 scavenge.timer.f = func(_ any, _ uintptr) { 268 wakeScavenger() 269 } 270 271 c <- 1 272 goparkunlock(&scavenge.lock, waitReasonGCScavengeWait, traceEvGoBlock, 1) 273 274 // idealFraction is the ideal % of overall application CPU time that we 275 // spend scavenging. 276 idealFraction := float64(scavengePercent) / 100.0 277 278 // Input: fraction of CPU time used. 279 // Setpoint: idealFraction. 280 // Output: ratio of critical time to sleep time (determines sleep time). 281 // 282 // The output of this controller is somewhat indirect to what we actually 283 // want to achieve: how much time to sleep for. The reason for this definition 284 // is to ensure that the controller's outputs have a direct relationship with 285 // its inputs (as opposed to an inverse relationship), making it somewhat 286 // easier to reason about for tuning purposes. 287 critSleepController := piController{ 288 // Tuned loosely via Ziegler-Nichols process. 289 kp: 0.3375, 290 ti: 3.2e6, 291 tt: 1e9, // 1 second reset time. 292 293 // These ranges seem wide, but we want to give the controller plenty of 294 // room to hunt for the optimal value. 295 min: 0.001, // 1:1000 296 max: 1000.0, // 1000:1 297 } 298 // It doesn't really matter what value we start at, but we can't be zero, because 299 // that'll cause divide-by-zero issues. Pick something conservative which we'll 300 // also use as a fallback. 301 const startingCritSleepRatio = 0.001 302 critSleepRatio := startingCritSleepRatio 303 // Duration left in nanoseconds during which we avoid using the controller and 304 // we hold critSleepRatio at a conservative value. Used if the controller's 305 // assumptions fail to hold. 306 controllerCooldown := int64(0) 307 for { 308 released := uintptr(0) 309 crit := float64(0) 310 311 // Spend at least 1 ms scavenging, otherwise the corresponding 312 // sleep time to maintain our desired utilization is too low to 313 // be reliable. 314 const minCritTime = 1e6 315 for crit < minCritTime { 316 // If background scavenging is disabled or if there's no work to do just park. 317 retained, goal := heapRetained(), atomic.Load64(&mheap_.scavengeGoal) 318 if retained <= goal { 319 break 320 } 321 322 // scavengeQuantum is the amount of memory we try to scavenge 323 // in one go. A smaller value means the scavenger is more responsive 324 // to the scheduler in case of e.g. preemption. A larger value means 325 // that the overheads of scavenging are better amortized, so better 326 // scavenging throughput. 327 // 328 // The current value is chosen assuming a cost of ~10µs/physical page 329 // (this is somewhat pessimistic), which implies a worst-case latency of 330 // about 160µs for 4 KiB physical pages. The current value is biased 331 // toward latency over throughput. 332 const scavengeQuantum = 64 << 10 333 334 // Accumulate the amount of time spent scavenging. 335 start := nanotime() 336 r := mheap_.pages.scavenge(scavengeQuantum) 337 atomic.Xadduintptr(&mheap_.pages.scav.released, r) 338 end := nanotime() 339 340 // On some platforms we may see end >= start if the time it takes to scavenge 341 // memory is less than the minimum granularity of its clock (e.g. Windows) or 342 // due to clock bugs. 343 // 344 // In this case, just assume scavenging takes 10 µs per regular physical page 345 // (determined empirically), and conservatively ignore the impact of huge pages 346 // on timing. 347 const approxCritNSPerPhysicalPage = 10e3 348 if end <= start { 349 crit += approxCritNSPerPhysicalPage * float64(r/physPageSize) 350 } else { 351 crit += float64(end - start) 352 } 353 released += r 354 355 // When using fake time just do one loop. 356 if faketime != 0 { 357 break 358 } 359 } 360 361 if released == 0 { 362 lock(&scavenge.lock) 363 scavenge.parked = true 364 goparkunlock(&scavenge.lock, waitReasonGCScavengeWait, traceEvGoBlock, 1) 365 continue 366 } 367 368 if released < physPageSize { 369 // If this happens, it means that we may have attempted to release part 370 // of a physical page, but the likely effect of that is that it released 371 // the whole physical page, some of which may have still been in-use. 372 // This could lead to memory corruption. Throw. 373 throw("released less than one physical page of memory") 374 } 375 376 if crit < minCritTime { 377 // This means there wasn't enough work to actually fill up minCritTime. 378 // That's fine; we shouldn't try to do anything with this information 379 // because it's going result in a short enough sleep request that things 380 // will get messy. Just assume we did at least this much work. 381 // All this means is that we'll sleep longer than we otherwise would have. 382 crit = minCritTime 383 } 384 385 // Multiply the critical time by 1 + the ratio of the costs of using 386 // scavenged memory vs. scavenging memory. This forces us to pay down 387 // the cost of reusing this memory eagerly by sleeping for a longer period 388 // of time and scavenging less frequently. More concretely, we avoid situations 389 // where we end up scavenging so often that we hurt allocation performance 390 // because of the additional overheads of using scavenged memory. 391 crit *= 1 + scavengeCostRatio 392 393 // Go to sleep based on how much time we spent doing work. 394 slept := scavengeSleep(int64(crit / critSleepRatio)) 395 396 // Stop here if we're cooling down from the controller. 397 if controllerCooldown > 0 { 398 // crit and slept aren't exact measures of time, but it's OK to be a bit 399 // sloppy here. We're just hoping we're avoiding some transient bad behavior. 400 t := slept + int64(crit) 401 if t > controllerCooldown { 402 controllerCooldown = 0 403 } else { 404 controllerCooldown -= t 405 } 406 continue 407 } 408 409 // Calculate the CPU time spent. 410 // 411 // This may be slightly inaccurate with respect to GOMAXPROCS, but we're 412 // recomputing this often enough relative to GOMAXPROCS changes in general 413 // (it only changes when the world is stopped, and not during a GC) that 414 // that small inaccuracy is in the noise. 415 cpuFraction := float64(crit) / ((float64(slept) + crit) * float64(gomaxprocs)) 416 417 // Update the critSleepRatio, adjusting until we reach our ideal fraction. 418 var ok bool 419 critSleepRatio, ok = critSleepController.next(cpuFraction, idealFraction, float64(slept)+crit) 420 if !ok { 421 // The core assumption of the controller, that we can get a proportional 422 // response, broke down. This may be transient, so temporarily switch to 423 // sleeping a fixed, conservative amount. 424 critSleepRatio = startingCritSleepRatio 425 controllerCooldown = 5e9 // 5 seconds. 426 427 // Signal the scav trace printer to output this. 428 lock(&scavenge.lock) 429 scavenge.printControllerReset = true 430 unlock(&scavenge.lock) 431 } 432 } 433 } 434 435 // scavenge scavenges nbytes worth of free pages, starting with the 436 // highest address first. Successive calls continue from where it left 437 // off until the heap is exhausted. Call scavengeStartGen to bring it 438 // back to the top of the heap. 439 // 440 // Returns the amount of memory scavenged in bytes. 441 func (p *pageAlloc) scavenge(nbytes uintptr) uintptr { 442 var ( 443 addrs addrRange 444 gen uint32 445 ) 446 released := uintptr(0) 447 for released < nbytes { 448 if addrs.size() == 0 { 449 if addrs, gen = p.scavengeReserve(); addrs.size() == 0 { 450 break 451 } 452 } 453 systemstack(func() { 454 r, a := p.scavengeOne(addrs, nbytes-released) 455 released += r 456 addrs = a 457 }) 458 } 459 // Only unreserve the space which hasn't been scavenged or searched 460 // to ensure we always make progress. 461 p.scavengeUnreserve(addrs, gen) 462 return released 463 } 464 465 // printScavTrace prints a scavenge trace line to standard error. 466 // 467 // released should be the amount of memory released since the last time this 468 // was called, and forced indicates whether the scavenge was forced by the 469 // application. 470 // 471 // scavenge.lock must be held. 472 func printScavTrace(gen uint32, released uintptr, forced bool) { 473 assertLockHeld(&scavenge.lock) 474 475 printlock() 476 print("scav ", gen, " ", 477 released>>10, " KiB work, ", 478 atomic.Load64(&memstats.heap_released)>>10, " KiB total, ", 479 (atomic.Load64(&memstats.heap_inuse)*100)/heapRetained(), "% util", 480 ) 481 if forced { 482 print(" (forced)") 483 } else if scavenge.printControllerReset { 484 print(" [controller reset]") 485 scavenge.printControllerReset = false 486 } 487 println() 488 printunlock() 489 } 490 491 // scavengeStartGen starts a new scavenge generation, resetting 492 // the scavenger's search space to the full in-use address space. 493 // 494 // p.mheapLock must be held. 495 // 496 // Must run on the system stack because p.mheapLock must be held. 497 // 498 //go:systemstack 499 func (p *pageAlloc) scavengeStartGen() { 500 assertLockHeld(p.mheapLock) 501 502 lock(&p.scav.lock) 503 if debug.scavtrace > 0 { 504 printScavTrace(p.scav.gen, atomic.Loaduintptr(&p.scav.released), false) 505 } 506 p.inUse.cloneInto(&p.scav.inUse) 507 508 // Pick the new starting address for the scavenger cycle. 509 var startAddr offAddr 510 if p.scav.scavLWM.lessThan(p.scav.freeHWM) { 511 // The "free" high watermark exceeds the "scavenged" low watermark, 512 // so there are free scavengable pages in parts of the address space 513 // that the scavenger already searched, the high watermark being the 514 // highest one. Pick that as our new starting point to ensure we 515 // see those pages. 516 startAddr = p.scav.freeHWM 517 } else { 518 // The "free" high watermark does not exceed the "scavenged" low 519 // watermark. This means the allocator didn't free any memory in 520 // the range we scavenged last cycle, so we might as well continue 521 // scavenging from where we were. 522 startAddr = p.scav.scavLWM 523 } 524 p.scav.inUse.removeGreaterEqual(startAddr.addr()) 525 526 // reservationBytes may be zero if p.inUse.totalBytes is small, or if 527 // scavengeReservationShards is large. This case is fine as the scavenger 528 // will simply be turned off, but it does mean that scavengeReservationShards, 529 // in concert with pallocChunkBytes, dictates the minimum heap size at which 530 // the scavenger triggers. In practice this minimum is generally less than an 531 // arena in size, so virtually every heap has the scavenger on. 532 p.scav.reservationBytes = alignUp(p.inUse.totalBytes, pallocChunkBytes) / scavengeReservationShards 533 p.scav.gen++ 534 atomic.Storeuintptr(&p.scav.released, 0) 535 p.scav.freeHWM = minOffAddr 536 p.scav.scavLWM = maxOffAddr 537 unlock(&p.scav.lock) 538 } 539 540 // scavengeReserve reserves a contiguous range of the address space 541 // for scavenging. The maximum amount of space it reserves is proportional 542 // to the size of the heap. The ranges are reserved from the high addresses 543 // first. 544 // 545 // Returns the reserved range and the scavenge generation number for it. 546 func (p *pageAlloc) scavengeReserve() (addrRange, uint32) { 547 lock(&p.scav.lock) 548 gen := p.scav.gen 549 550 // Start by reserving the minimum. 551 r := p.scav.inUse.removeLast(p.scav.reservationBytes) 552 553 // Return early if the size is zero; we don't want to use 554 // the bogus address below. 555 if r.size() == 0 { 556 unlock(&p.scav.lock) 557 return r, gen 558 } 559 560 // The scavenger requires that base be aligned to a 561 // palloc chunk because that's the unit of operation for 562 // the scavenger, so align down, potentially extending 563 // the range. 564 newBase := alignDown(r.base.addr(), pallocChunkBytes) 565 566 // Remove from inUse however much extra we just pulled out. 567 p.scav.inUse.removeGreaterEqual(newBase) 568 unlock(&p.scav.lock) 569 570 r.base = offAddr{newBase} 571 return r, gen 572 } 573 574 // scavengeUnreserve returns an unscavenged portion of a range that was 575 // previously reserved with scavengeReserve. 576 func (p *pageAlloc) scavengeUnreserve(r addrRange, gen uint32) { 577 if r.size() == 0 { 578 return 579 } 580 if r.base.addr()%pallocChunkBytes != 0 { 581 throw("unreserving unaligned region") 582 } 583 lock(&p.scav.lock) 584 if gen == p.scav.gen { 585 p.scav.inUse.add(r) 586 } 587 unlock(&p.scav.lock) 588 } 589 590 // scavengeOne walks over address range work until it finds 591 // a contiguous run of pages to scavenge. It will try to scavenge 592 // at most max bytes at once, but may scavenge more to avoid 593 // breaking huge pages. Once it scavenges some memory it returns 594 // how much it scavenged in bytes. 595 // 596 // Returns the number of bytes scavenged and the part of work 597 // which was not yet searched. 598 // 599 // work's base address must be aligned to pallocChunkBytes. 600 // 601 // Must run on the systemstack because it acquires p.mheapLock. 602 // 603 //go:systemstack 604 func (p *pageAlloc) scavengeOne(work addrRange, max uintptr) (uintptr, addrRange) { 605 // Defensively check if we've received an empty address range. 606 // If so, just return. 607 if work.size() == 0 { 608 // Nothing to do. 609 return 0, work 610 } 611 // Check the prerequisites of work. 612 if work.base.addr()%pallocChunkBytes != 0 { 613 throw("scavengeOne called with unaligned work region") 614 } 615 // Calculate the maximum number of pages to scavenge. 616 // 617 // This should be alignUp(max, pageSize) / pageSize but max can and will 618 // be ^uintptr(0), so we need to be very careful not to overflow here. 619 // Rather than use alignUp, calculate the number of pages rounded down 620 // first, then add back one if necessary. 621 maxPages := max / pageSize 622 if max%pageSize != 0 { 623 maxPages++ 624 } 625 626 // Calculate the minimum number of pages we can scavenge. 627 // 628 // Because we can only scavenge whole physical pages, we must 629 // ensure that we scavenge at least minPages each time, aligned 630 // to minPages*pageSize. 631 minPages := physPageSize / pageSize 632 if minPages < 1 { 633 minPages = 1 634 } 635 636 // Fast path: check the chunk containing the top-most address in work. 637 if r, w := p.scavengeOneFast(work, minPages, maxPages); r != 0 { 638 return r, w 639 } else { 640 work = w 641 } 642 643 // findCandidate finds the next scavenge candidate in work optimistically. 644 // 645 // Returns the candidate chunk index and true on success, and false on failure. 646 // 647 // The heap need not be locked. 648 findCandidate := func(work addrRange) (chunkIdx, bool) { 649 // Iterate over this work's chunks. 650 for i := chunkIndex(work.limit.addr() - 1); i >= chunkIndex(work.base.addr()); i-- { 651 // If this chunk is totally in-use or has no unscavenged pages, don't bother 652 // doing a more sophisticated check. 653 // 654 // Note we're accessing the summary and the chunks without a lock, but 655 // that's fine. We're being optimistic anyway. 656 657 // Check quickly if there are enough free pages at all. 658 if p.summary[len(p.summary)-1][i].max() < uint(minPages) { 659 continue 660 } 661 662 // Run over the chunk looking harder for a candidate. Again, we could 663 // race with a lot of different pieces of code, but we're just being 664 // optimistic. Make sure we load the l2 pointer atomically though, to 665 // avoid races with heap growth. It may or may not be possible to also 666 // see a nil pointer in this case if we do race with heap growth, but 667 // just defensively ignore the nils. This operation is optimistic anyway. 668 l2 := (*[1 << pallocChunksL2Bits]pallocData)(atomic.Loadp(unsafe.Pointer(&p.chunks[i.l1()]))) 669 if l2 != nil && l2[i.l2()].hasScavengeCandidate(minPages) { 670 return i, true 671 } 672 } 673 return 0, false 674 } 675 676 // Slow path: iterate optimistically over the in-use address space 677 // looking for any free and unscavenged page. If we think we see something, 678 // lock and verify it! 679 for work.size() != 0 { 680 681 // Search for the candidate. 682 candidateChunkIdx, ok := findCandidate(work) 683 if !ok { 684 // We didn't find a candidate, so we're done. 685 work.limit = work.base 686 break 687 } 688 689 // Lock, so we can verify what we found. 690 lock(p.mheapLock) 691 692 // Find, verify, and scavenge if we can. 693 chunk := p.chunkOf(candidateChunkIdx) 694 base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages) 695 if npages > 0 { 696 work.limit = offAddr{p.scavengeRangeLocked(candidateChunkIdx, base, npages)} 697 unlock(p.mheapLock) 698 return uintptr(npages) * pageSize, work 699 } 700 unlock(p.mheapLock) 701 702 // We were fooled, so let's continue from where we left off. 703 work.limit = offAddr{chunkBase(candidateChunkIdx)} 704 } 705 return 0, work 706 } 707 708 // scavengeOneFast is the fast path for scavengeOne, which just checks the top 709 // chunk of work for some pages to scavenge. 710 // 711 // Must run on the system stack because it acquires the heap lock. 712 // 713 //go:systemstack 714 func (p *pageAlloc) scavengeOneFast(work addrRange, minPages, maxPages uintptr) (uintptr, addrRange) { 715 maxAddr := work.limit.addr() - 1 716 maxChunk := chunkIndex(maxAddr) 717 718 lock(p.mheapLock) 719 if p.summary[len(p.summary)-1][maxChunk].max() >= uint(minPages) { 720 // We only bother looking for a candidate if there at least 721 // minPages free pages at all. 722 base, npages := p.chunkOf(maxChunk).findScavengeCandidate(chunkPageIndex(maxAddr), minPages, maxPages) 723 724 // If we found something, scavenge it and return! 725 if npages != 0 { 726 work.limit = offAddr{p.scavengeRangeLocked(maxChunk, base, npages)} 727 unlock(p.mheapLock) 728 return uintptr(npages) * pageSize, work 729 } 730 } 731 unlock(p.mheapLock) 732 733 // Update the limit to reflect the fact that we checked maxChunk already. 734 work.limit = offAddr{chunkBase(maxChunk)} 735 return 0, work 736 } 737 738 // scavengeRangeLocked scavenges the given region of memory. 739 // The region of memory is described by its chunk index (ci), 740 // the starting page index of the region relative to that 741 // chunk (base), and the length of the region in pages (npages). 742 // 743 // Returns the base address of the scavenged region. 744 // 745 // p.mheapLock must be held. Unlocks p.mheapLock but reacquires 746 // it before returning. Must be run on the systemstack as a result. 747 // 748 //go:systemstack 749 func (p *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr { 750 assertLockHeld(p.mheapLock) 751 752 // Compute the full address for the start of the range. 753 addr := chunkBase(ci) + uintptr(base)*pageSize 754 755 // Mark the range we're about to scavenge as allocated, because 756 // we don't want any allocating goroutines to grab it while 757 // the scavenging is in progress. 758 if scav := p.allocRange(addr, uintptr(npages)); scav != 0 { 759 throw("double scavenge") 760 } 761 762 // With that done, it's safe to unlock. 763 unlock(p.mheapLock) 764 765 // Update the scavenge low watermark. 766 lock(&p.scav.lock) 767 if oAddr := (offAddr{addr}); oAddr.lessThan(p.scav.scavLWM) { 768 p.scav.scavLWM = oAddr 769 } 770 unlock(&p.scav.lock) 771 772 if !p.test { 773 // Only perform the actual scavenging if we're not in a test. 774 // It's dangerous to do so otherwise. 775 sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize) 776 777 // Update global accounting only when not in test, otherwise 778 // the runtime's accounting will be wrong. 779 nbytes := int64(npages) * pageSize 780 atomic.Xadd64(&memstats.heap_released, nbytes) 781 782 // Update consistent accounting too. 783 stats := memstats.heapStats.acquire() 784 atomic.Xaddint64(&stats.committed, -nbytes) 785 atomic.Xaddint64(&stats.released, nbytes) 786 memstats.heapStats.release() 787 } 788 789 // Relock the heap, because now we need to make these pages 790 // available allocation. Free them back to the page allocator. 791 lock(p.mheapLock) 792 p.free(addr, uintptr(npages), true) 793 794 // Mark the range as scavenged. 795 p.chunkOf(ci).scavenged.setRange(base, npages) 796 return addr 797 } 798 799 // fillAligned returns x but with all zeroes in m-aligned 800 // groups of m bits set to 1 if any bit in the group is non-zero. 801 // 802 // For example, fillAligned(0x0100a3, 8) == 0xff00ff. 803 // 804 // Note that if m == 1, this is a no-op. 805 // 806 // m must be a power of 2 <= maxPagesPerPhysPage. 807 func fillAligned(x uint64, m uint) uint64 { 808 apply := func(x uint64, c uint64) uint64 { 809 // The technique used it here is derived from 810 // https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord 811 // and extended for more than just bytes (like nibbles 812 // and uint16s) by using an appropriate constant. 813 // 814 // To summarize the technique, quoting from that page: 815 // "[It] works by first zeroing the high bits of the [8] 816 // bytes in the word. Subsequently, it adds a number that 817 // will result in an overflow to the high bit of a byte if 818 // any of the low bits were initially set. Next the high 819 // bits of the original word are ORed with these values; 820 // thus, the high bit of a byte is set iff any bit in the 821 // byte was set. Finally, we determine if any of these high 822 // bits are zero by ORing with ones everywhere except the 823 // high bits and inverting the result." 824 return ^((((x & c) + c) | x) | c) 825 } 826 // Transform x to contain a 1 bit at the top of each m-aligned 827 // group of m zero bits. 828 switch m { 829 case 1: 830 return x 831 case 2: 832 x = apply(x, 0x5555555555555555) 833 case 4: 834 x = apply(x, 0x7777777777777777) 835 case 8: 836 x = apply(x, 0x7f7f7f7f7f7f7f7f) 837 case 16: 838 x = apply(x, 0x7fff7fff7fff7fff) 839 case 32: 840 x = apply(x, 0x7fffffff7fffffff) 841 case 64: // == maxPagesPerPhysPage 842 x = apply(x, 0x7fffffffffffffff) 843 default: 844 throw("bad m value") 845 } 846 // Now, the top bit of each m-aligned group in x is set 847 // that group was all zero in the original x. 848 849 // From each group of m bits subtract 1. 850 // Because we know only the top bits of each 851 // m-aligned group are set, we know this will 852 // set each group to have all the bits set except 853 // the top bit, so just OR with the original 854 // result to set all the bits. 855 return ^((x - (x >> (m - 1))) | x) 856 } 857 858 // hasScavengeCandidate returns true if there's any min-page-aligned groups of 859 // min pages of free-and-unscavenged memory in the region represented by this 860 // pallocData. 861 // 862 // min must be a non-zero power of 2 <= maxPagesPerPhysPage. 863 func (m *pallocData) hasScavengeCandidate(min uintptr) bool { 864 if min&(min-1) != 0 || min == 0 { 865 print("runtime: min = ", min, "\n") 866 throw("min must be a non-zero power of 2") 867 } else if min > maxPagesPerPhysPage { 868 print("runtime: min = ", min, "\n") 869 throw("min too large") 870 } 871 872 // The goal of this search is to see if the chunk contains any free and unscavenged memory. 873 for i := len(m.scavenged) - 1; i >= 0; i-- { 874 // 1s are scavenged OR non-free => 0s are unscavenged AND free 875 // 876 // TODO(mknyszek): Consider splitting up fillAligned into two 877 // functions, since here we technically could get by with just 878 // the first half of its computation. It'll save a few instructions 879 // but adds some additional code complexity. 880 x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min)) 881 882 // Quickly skip over chunks of non-free or scavenged pages. 883 if x != ^uint64(0) { 884 return true 885 } 886 } 887 return false 888 } 889 890 // findScavengeCandidate returns a start index and a size for this pallocData 891 // segment which represents a contiguous region of free and unscavenged memory. 892 // 893 // searchIdx indicates the page index within this chunk to start the search, but 894 // note that findScavengeCandidate searches backwards through the pallocData. As a 895 // a result, it will return the highest scavenge candidate in address order. 896 // 897 // min indicates a hard minimum size and alignment for runs of pages. That is, 898 // findScavengeCandidate will not return a region smaller than min pages in size, 899 // or that is min pages or greater in size but not aligned to min. min must be 900 // a non-zero power of 2 <= maxPagesPerPhysPage. 901 // 902 // max is a hint for how big of a region is desired. If max >= pallocChunkPages, then 903 // findScavengeCandidate effectively returns entire free and unscavenged regions. 904 // If max < pallocChunkPages, it may truncate the returned region such that size is 905 // max. However, findScavengeCandidate may still return a larger region if, for 906 // example, it chooses to preserve huge pages, or if max is not aligned to min (it 907 // will round up). That is, even if max is small, the returned size is not guaranteed 908 // to be equal to max. max is allowed to be less than min, in which case it is as if 909 // max == min. 910 func (m *pallocData) findScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) { 911 if min&(min-1) != 0 || min == 0 { 912 print("runtime: min = ", min, "\n") 913 throw("min must be a non-zero power of 2") 914 } else if min > maxPagesPerPhysPage { 915 print("runtime: min = ", min, "\n") 916 throw("min too large") 917 } 918 // max may not be min-aligned, so we might accidentally truncate to 919 // a max value which causes us to return a non-min-aligned value. 920 // To prevent this, align max up to a multiple of min (which is always 921 // a power of 2). This also prevents max from ever being less than 922 // min, unless it's zero, so handle that explicitly. 923 if max == 0 { 924 max = min 925 } else { 926 max = alignUp(max, min) 927 } 928 929 i := int(searchIdx / 64) 930 // Start by quickly skipping over blocks of non-free or scavenged pages. 931 for ; i >= 0; i-- { 932 // 1s are scavenged OR non-free => 0s are unscavenged AND free 933 x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min)) 934 if x != ^uint64(0) { 935 break 936 } 937 } 938 if i < 0 { 939 // Failed to find any free/unscavenged pages. 940 return 0, 0 941 } 942 // We have something in the 64-bit chunk at i, but it could 943 // extend further. Loop until we find the extent of it. 944 945 // 1s are scavenged OR non-free => 0s are unscavenged AND free 946 x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min)) 947 z1 := uint(sys.LeadingZeros64(^x)) 948 run, end := uint(0), uint(i)*64+(64-z1) 949 if x<<z1 != 0 { 950 // After shifting out z1 bits, we still have 1s, 951 // so the run ends inside this word. 952 run = uint(sys.LeadingZeros64(x << z1)) 953 } else { 954 // After shifting out z1 bits, we have no more 1s. 955 // This means the run extends to the bottom of the 956 // word so it may extend into further words. 957 run = 64 - z1 958 for j := i - 1; j >= 0; j-- { 959 x := fillAligned(m.scavenged[j]|m.pallocBits[j], uint(min)) 960 run += uint(sys.LeadingZeros64(x)) 961 if x != 0 { 962 // The run stopped in this word. 963 break 964 } 965 } 966 } 967 968 // Split the run we found if it's larger than max but hold on to 969 // our original length, since we may need it later. 970 size := run 971 if size > uint(max) { 972 size = uint(max) 973 } 974 start := end - size 975 976 // Each huge page is guaranteed to fit in a single palloc chunk. 977 // 978 // TODO(mknyszek): Support larger huge page sizes. 979 // TODO(mknyszek): Consider taking pages-per-huge-page as a parameter 980 // so we can write tests for this. 981 if physHugePageSize > pageSize && physHugePageSize > physPageSize { 982 // We have huge pages, so let's ensure we don't break one by scavenging 983 // over a huge page boundary. If the range [start, start+size) overlaps with 984 // a free-and-unscavenged huge page, we want to grow the region we scavenge 985 // to include that huge page. 986 987 // Compute the huge page boundary above our candidate. 988 pagesPerHugePage := uintptr(physHugePageSize / pageSize) 989 hugePageAbove := uint(alignUp(uintptr(start), pagesPerHugePage)) 990 991 // If that boundary is within our current candidate, then we may be breaking 992 // a huge page. 993 if hugePageAbove <= end { 994 // Compute the huge page boundary below our candidate. 995 hugePageBelow := uint(alignDown(uintptr(start), pagesPerHugePage)) 996 997 if hugePageBelow >= end-run { 998 // We're in danger of breaking apart a huge page since start+size crosses 999 // a huge page boundary and rounding down start to the nearest huge 1000 // page boundary is included in the full run we found. Include the entire 1001 // huge page in the bound by rounding down to the huge page size. 1002 size = size + (start - hugePageBelow) 1003 start = hugePageBelow 1004 } 1005 } 1006 } 1007 return start, size 1008 } 1009