Source file src/runtime/testdata/testprog/gc.go

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package main
     6  
     7  import (
     8  	"fmt"
     9  	"os"
    10  	"runtime"
    11  	"runtime/debug"
    12  	"sync/atomic"
    13  	"time"
    14  	"unsafe"
    15  )
    16  
    17  func init() {
    18  	register("GCFairness", GCFairness)
    19  	register("GCFairness2", GCFairness2)
    20  	register("GCSys", GCSys)
    21  	register("GCPhys", GCPhys)
    22  	register("DeferLiveness", DeferLiveness)
    23  	register("GCZombie", GCZombie)
    24  }
    25  
    26  func GCSys() {
    27  	runtime.GOMAXPROCS(1)
    28  	memstats := new(runtime.MemStats)
    29  	runtime.GC()
    30  	runtime.ReadMemStats(memstats)
    31  	sys := memstats.Sys
    32  
    33  	runtime.MemProfileRate = 0 // disable profiler
    34  
    35  	itercount := 100000
    36  	for i := 0; i < itercount; i++ {
    37  		workthegc()
    38  	}
    39  
    40  	// Should only be using a few MB.
    41  	// We allocated 100 MB or (if not short) 1 GB.
    42  	runtime.ReadMemStats(memstats)
    43  	if sys > memstats.Sys {
    44  		sys = 0
    45  	} else {
    46  		sys = memstats.Sys - sys
    47  	}
    48  	if sys > 16<<20 {
    49  		fmt.Printf("using too much memory: %d bytes\n", sys)
    50  		return
    51  	}
    52  	fmt.Printf("OK\n")
    53  }
    54  
    55  var sink []byte
    56  
    57  func workthegc() []byte {
    58  	sink = make([]byte, 1029)
    59  	return sink
    60  }
    61  
    62  func GCFairness() {
    63  	runtime.GOMAXPROCS(1)
    64  	f, err := os.Open("/dev/null")
    65  	if os.IsNotExist(err) {
    66  		// This test tests what it is intended to test only if writes are fast.
    67  		// If there is no /dev/null, we just don't execute the test.
    68  		fmt.Println("OK")
    69  		return
    70  	}
    71  	if err != nil {
    72  		fmt.Println(err)
    73  		os.Exit(1)
    74  	}
    75  	for i := 0; i < 2; i++ {
    76  		go func() {
    77  			for {
    78  				f.Write([]byte("."))
    79  			}
    80  		}()
    81  	}
    82  	time.Sleep(10 * time.Millisecond)
    83  	fmt.Println("OK")
    84  }
    85  
    86  func GCFairness2() {
    87  	// Make sure user code can't exploit the GC's high priority
    88  	// scheduling to make scheduling of user code unfair. See
    89  	// issue #15706.
    90  	runtime.GOMAXPROCS(1)
    91  	debug.SetGCPercent(1)
    92  	var count [3]int64
    93  	var sink [3]any
    94  	for i := range count {
    95  		go func(i int) {
    96  			for {
    97  				sink[i] = make([]byte, 1024)
    98  				atomic.AddInt64(&count[i], 1)
    99  			}
   100  		}(i)
   101  	}
   102  	// Note: If the unfairness is really bad, it may not even get
   103  	// past the sleep.
   104  	//
   105  	// If the scheduling rules change, this may not be enough time
   106  	// to let all goroutines run, but for now we cycle through
   107  	// them rapidly.
   108  	//
   109  	// OpenBSD's scheduler makes every usleep() take at least
   110  	// 20ms, so we need a long time to ensure all goroutines have
   111  	// run. If they haven't run after 30ms, give it another 1000ms
   112  	// and check again.
   113  	time.Sleep(30 * time.Millisecond)
   114  	var fail bool
   115  	for i := range count {
   116  		if atomic.LoadInt64(&count[i]) == 0 {
   117  			fail = true
   118  		}
   119  	}
   120  	if fail {
   121  		time.Sleep(1 * time.Second)
   122  		for i := range count {
   123  			if atomic.LoadInt64(&count[i]) == 0 {
   124  				fmt.Printf("goroutine %d did not run\n", i)
   125  				return
   126  			}
   127  		}
   128  	}
   129  	fmt.Println("OK")
   130  }
   131  
   132  func GCPhys() {
   133  	// This test ensures that heap-growth scavenging is working as intended.
   134  	//
   135  	// It attempts to construct a sizeable "swiss cheese" heap, with many
   136  	// allocChunk-sized holes. Then, it triggers a heap growth by trying to
   137  	// allocate as much memory as would fit in those holes.
   138  	//
   139  	// The heap growth should cause a large number of those holes to be
   140  	// returned to the OS.
   141  
   142  	const (
   143  		// The total amount of memory we're willing to allocate.
   144  		allocTotal = 32 << 20
   145  
   146  		// The page cache could hide 64 8-KiB pages from the scavenger today.
   147  		maxPageCache = (8 << 10) * 64
   148  	)
   149  
   150  	// How big the allocations are needs to depend on the page size.
   151  	// If the page size is too big and the allocations are too small,
   152  	// they might not be aligned to the physical page size, so the scavenger
   153  	// will gloss over them.
   154  	pageSize := os.Getpagesize()
   155  	var allocChunk int
   156  	if pageSize <= 8<<10 {
   157  		allocChunk = 64 << 10
   158  	} else {
   159  		allocChunk = 512 << 10
   160  	}
   161  	allocs := allocTotal / allocChunk
   162  
   163  	// Set GC percent just so this test is a little more consistent in the
   164  	// face of varying environments.
   165  	debug.SetGCPercent(100)
   166  
   167  	// Set GOMAXPROCS to 1 to minimize the amount of memory held in the page cache,
   168  	// and to reduce the chance that the background scavenger gets scheduled.
   169  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   170  
   171  	// Allocate allocTotal bytes of memory in allocChunk byte chunks.
   172  	// Alternate between whether the chunk will be held live or will be
   173  	// condemned to GC to create holes in the heap.
   174  	saved := make([][]byte, allocs/2+1)
   175  	condemned := make([][]byte, allocs/2)
   176  	for i := 0; i < allocs; i++ {
   177  		b := make([]byte, allocChunk)
   178  		if i%2 == 0 {
   179  			saved = append(saved, b)
   180  		} else {
   181  			condemned = append(condemned, b)
   182  		}
   183  	}
   184  
   185  	// Run a GC cycle just so we're at a consistent state.
   186  	runtime.GC()
   187  
   188  	// Drop the only reference to all the condemned memory.
   189  	condemned = nil
   190  
   191  	// Clear the condemned memory.
   192  	runtime.GC()
   193  
   194  	// At this point, the background scavenger is likely running
   195  	// and could pick up the work, so the next line of code doesn't
   196  	// end up doing anything. That's fine. What's important is that
   197  	// this test fails somewhat regularly if the runtime doesn't
   198  	// scavenge on heap growth, and doesn't fail at all otherwise.
   199  
   200  	// Make a large allocation that in theory could fit, but won't
   201  	// because we turned the heap into swiss cheese.
   202  	saved = append(saved, make([]byte, allocTotal/2))
   203  
   204  	// heapBacked is an estimate of the amount of physical memory used by
   205  	// this test. HeapSys is an estimate of the size of the mapped virtual
   206  	// address space (which may or may not be backed by physical pages)
   207  	// whereas HeapReleased is an estimate of the amount of bytes returned
   208  	// to the OS. Their difference then roughly corresponds to the amount
   209  	// of virtual address space that is backed by physical pages.
   210  	//
   211  	// heapBacked also subtracts out maxPageCache bytes of memory because
   212  	// this is memory that may be hidden from the scavenger per-P. Since
   213  	// GOMAXPROCS=1 here, subtracting it out once is fine.
   214  	var stats runtime.MemStats
   215  	runtime.ReadMemStats(&stats)
   216  	heapBacked := stats.HeapSys - stats.HeapReleased - maxPageCache
   217  	// If heapBacked does not exceed the heap goal by more than retainExtraPercent
   218  	// then the scavenger is working as expected; the newly-created holes have been
   219  	// scavenged immediately as part of the allocations which cannot fit in the holes.
   220  	//
   221  	// Since the runtime should scavenge the entirety of the remaining holes,
   222  	// theoretically there should be no more free and unscavenged memory. However due
   223  	// to other allocations that happen during this test we may still see some physical
   224  	// memory over-use.
   225  	overuse := (float64(heapBacked) - float64(stats.HeapAlloc)) / float64(stats.HeapAlloc)
   226  	// Check against our overuse threshold, which is what the scavenger always reserves
   227  	// to encourage allocation of memory that doesn't need to be faulted in.
   228  	//
   229  	// Add additional slack in case the page size is large and the scavenger
   230  	// can't reach that memory because it doesn't constitute a complete aligned
   231  	// physical page. Assume the worst case: a full physical page out of each
   232  	// allocation.
   233  	threshold := 0.1 + float64(pageSize)/float64(allocChunk)
   234  	if overuse <= threshold {
   235  		fmt.Println("OK")
   236  		return
   237  	}
   238  	// Physical memory utilization exceeds the threshold, so heap-growth scavenging
   239  	// did not operate as expected.
   240  	//
   241  	// In the context of this test, this indicates a large amount of
   242  	// fragmentation with physical pages that are otherwise unused but not
   243  	// returned to the OS.
   244  	fmt.Printf("exceeded physical memory overuse threshold of %3.2f%%: %3.2f%%\n"+
   245  		"(alloc: %d, goal: %d, sys: %d, rel: %d, objs: %d)\n", threshold*100, overuse*100,
   246  		stats.HeapAlloc, stats.NextGC, stats.HeapSys, stats.HeapReleased, len(saved))
   247  	runtime.KeepAlive(saved)
   248  	runtime.KeepAlive(condemned)
   249  }
   250  
   251  // Test that defer closure is correctly scanned when the stack is scanned.
   252  func DeferLiveness() {
   253  	var x [10]int
   254  	escape(&x)
   255  	fn := func() {
   256  		if x[0] != 42 {
   257  			panic("FAIL")
   258  		}
   259  	}
   260  	defer fn()
   261  
   262  	x[0] = 42
   263  	runtime.GC()
   264  	runtime.GC()
   265  	runtime.GC()
   266  }
   267  
   268  //go:noinline
   269  func escape(x any) { sink2 = x; sink2 = nil }
   270  
   271  var sink2 any
   272  
   273  // Test zombie object detection and reporting.
   274  func GCZombie() {
   275  	// Allocate several objects of unusual size (so free slots are
   276  	// unlikely to all be re-allocated by the runtime).
   277  	const size = 190
   278  	const count = 8192 / size
   279  	keep := make([]*byte, 0, (count+1)/2)
   280  	free := make([]uintptr, 0, (count+1)/2)
   281  	zombies := make([]*byte, 0, len(free))
   282  	for i := 0; i < count; i++ {
   283  		obj := make([]byte, size)
   284  		p := &obj[0]
   285  		if i%2 == 0 {
   286  			keep = append(keep, p)
   287  		} else {
   288  			free = append(free, uintptr(unsafe.Pointer(p)))
   289  		}
   290  	}
   291  
   292  	// Free the unreferenced objects.
   293  	runtime.GC()
   294  
   295  	// Bring the free objects back to life.
   296  	for _, p := range free {
   297  		zombies = append(zombies, (*byte)(unsafe.Pointer(p)))
   298  	}
   299  
   300  	// GC should detect the zombie objects.
   301  	runtime.GC()
   302  	println("failed")
   303  	runtime.KeepAlive(keep)
   304  	runtime.KeepAlive(zombies)
   305  }
   306  

View as plain text