Source file src/sync/pool_test.go

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Pool is no-op under race detector, so all these tests do not work.
     6  //go:build !race
     7  
     8  package sync_test
     9  
    10  import (
    11  	"runtime"
    12  	"runtime/debug"
    13  	"sort"
    14  	. "sync"
    15  	"sync/atomic"
    16  	"testing"
    17  	"time"
    18  )
    19  
    20  func TestPool(t *testing.T) {
    21  	// disable GC so we can control when it happens.
    22  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
    23  	var p Pool
    24  	if p.Get() != nil {
    25  		t.Fatal("expected empty")
    26  	}
    27  
    28  	// Make sure that the goroutine doesn't migrate to another P
    29  	// between Put and Get calls.
    30  	Runtime_procPin()
    31  	p.Put("a")
    32  	p.Put("b")
    33  	if g := p.Get(); g != "a" {
    34  		t.Fatalf("got %#v; want a", g)
    35  	}
    36  	if g := p.Get(); g != "b" {
    37  		t.Fatalf("got %#v; want b", g)
    38  	}
    39  	if g := p.Get(); g != nil {
    40  		t.Fatalf("got %#v; want nil", g)
    41  	}
    42  	Runtime_procUnpin()
    43  
    44  	// Put in a large number of objects so they spill into
    45  	// stealable space.
    46  	for i := 0; i < 100; i++ {
    47  		p.Put("c")
    48  	}
    49  	// After one GC, the victim cache should keep them alive.
    50  	runtime.GC()
    51  	if g := p.Get(); g != "c" {
    52  		t.Fatalf("got %#v; want c after GC", g)
    53  	}
    54  	// A second GC should drop the victim cache.
    55  	runtime.GC()
    56  	if g := p.Get(); g != nil {
    57  		t.Fatalf("got %#v; want nil after second GC", g)
    58  	}
    59  }
    60  
    61  func TestPoolNew(t *testing.T) {
    62  	// disable GC so we can control when it happens.
    63  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
    64  
    65  	i := 0
    66  	p := Pool{
    67  		New: func() any {
    68  			i++
    69  			return i
    70  		},
    71  	}
    72  	if v := p.Get(); v != 1 {
    73  		t.Fatalf("got %v; want 1", v)
    74  	}
    75  	if v := p.Get(); v != 2 {
    76  		t.Fatalf("got %v; want 2", v)
    77  	}
    78  
    79  	// Make sure that the goroutine doesn't migrate to another P
    80  	// between Put and Get calls.
    81  	Runtime_procPin()
    82  	p.Put(42)
    83  	if v := p.Get(); v != 42 {
    84  		t.Fatalf("got %v; want 42", v)
    85  	}
    86  	Runtime_procUnpin()
    87  
    88  	if v := p.Get(); v != 3 {
    89  		t.Fatalf("got %v; want 3", v)
    90  	}
    91  }
    92  
    93  // Test that Pool does not hold pointers to previously cached resources.
    94  func TestPoolGC(t *testing.T) {
    95  	testPool(t, true)
    96  }
    97  
    98  // Test that Pool releases resources on GC.
    99  func TestPoolRelease(t *testing.T) {
   100  	testPool(t, false)
   101  }
   102  
   103  func testPool(t *testing.T, drain bool) {
   104  	var p Pool
   105  	const N = 100
   106  loop:
   107  	for try := 0; try < 3; try++ {
   108  		if try == 1 && testing.Short() {
   109  			break
   110  		}
   111  		var fin, fin1 uint32
   112  		for i := 0; i < N; i++ {
   113  			v := new(string)
   114  			runtime.SetFinalizer(v, func(vv *string) {
   115  				atomic.AddUint32(&fin, 1)
   116  			})
   117  			p.Put(v)
   118  		}
   119  		if drain {
   120  			for i := 0; i < N; i++ {
   121  				p.Get()
   122  			}
   123  		}
   124  		for i := 0; i < 5; i++ {
   125  			runtime.GC()
   126  			time.Sleep(time.Duration(i*100+10) * time.Millisecond)
   127  			// 1 pointer can remain on stack or elsewhere
   128  			if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 {
   129  				continue loop
   130  			}
   131  		}
   132  		t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try)
   133  	}
   134  }
   135  
   136  func TestPoolStress(t *testing.T) {
   137  	const P = 10
   138  	N := int(1e6)
   139  	if testing.Short() {
   140  		N /= 100
   141  	}
   142  	var p Pool
   143  	done := make(chan bool)
   144  	for i := 0; i < P; i++ {
   145  		go func() {
   146  			var v any = 0
   147  			for j := 0; j < N; j++ {
   148  				if v == nil {
   149  					v = 0
   150  				}
   151  				p.Put(v)
   152  				v = p.Get()
   153  				if v != nil && v.(int) != 0 {
   154  					t.Errorf("expect 0, got %v", v)
   155  					break
   156  				}
   157  			}
   158  			done <- true
   159  		}()
   160  	}
   161  	for i := 0; i < P; i++ {
   162  		<-done
   163  	}
   164  }
   165  
   166  func TestPoolDequeue(t *testing.T) {
   167  	testPoolDequeue(t, NewPoolDequeue(16))
   168  }
   169  
   170  func TestPoolChain(t *testing.T) {
   171  	testPoolDequeue(t, NewPoolChain())
   172  }
   173  
   174  func testPoolDequeue(t *testing.T, d PoolDequeue) {
   175  	const P = 10
   176  	var N int = 2e6
   177  	if testing.Short() {
   178  		N = 1e3
   179  	}
   180  	have := make([]int32, N)
   181  	var stop int32
   182  	var wg WaitGroup
   183  	record := func(val int) {
   184  		atomic.AddInt32(&have[val], 1)
   185  		if val == N-1 {
   186  			atomic.StoreInt32(&stop, 1)
   187  		}
   188  	}
   189  
   190  	// Start P-1 consumers.
   191  	for i := 1; i < P; i++ {
   192  		wg.Add(1)
   193  		go func() {
   194  			fail := 0
   195  			for atomic.LoadInt32(&stop) == 0 {
   196  				val, ok := d.PopTail()
   197  				if ok {
   198  					fail = 0
   199  					record(val.(int))
   200  				} else {
   201  					// Speed up the test by
   202  					// allowing the pusher to run.
   203  					if fail++; fail%100 == 0 {
   204  						runtime.Gosched()
   205  					}
   206  				}
   207  			}
   208  			wg.Done()
   209  		}()
   210  	}
   211  
   212  	// Start 1 producer.
   213  	nPopHead := 0
   214  	wg.Add(1)
   215  	go func() {
   216  		for j := 0; j < N; j++ {
   217  			for !d.PushHead(j) {
   218  				// Allow a popper to run.
   219  				runtime.Gosched()
   220  			}
   221  			if j%10 == 0 {
   222  				val, ok := d.PopHead()
   223  				if ok {
   224  					nPopHead++
   225  					record(val.(int))
   226  				}
   227  			}
   228  		}
   229  		wg.Done()
   230  	}()
   231  	wg.Wait()
   232  
   233  	// Check results.
   234  	for i, count := range have {
   235  		if count != 1 {
   236  			t.Errorf("expected have[%d] = 1, got %d", i, count)
   237  		}
   238  	}
   239  	// Check that at least some PopHeads succeeded. We skip this
   240  	// check in short mode because it's common enough that the
   241  	// queue will stay nearly empty all the time and a PopTail
   242  	// will happen during the window between every PushHead and
   243  	// PopHead.
   244  	if !testing.Short() && nPopHead == 0 {
   245  		t.Errorf("popHead never succeeded")
   246  	}
   247  }
   248  
   249  func BenchmarkPool(b *testing.B) {
   250  	var p Pool
   251  	b.RunParallel(func(pb *testing.PB) {
   252  		for pb.Next() {
   253  			p.Put(1)
   254  			p.Get()
   255  		}
   256  	})
   257  }
   258  
   259  func BenchmarkPoolOverflow(b *testing.B) {
   260  	var p Pool
   261  	b.RunParallel(func(pb *testing.PB) {
   262  		for pb.Next() {
   263  			for b := 0; b < 100; b++ {
   264  				p.Put(1)
   265  			}
   266  			for b := 0; b < 100; b++ {
   267  				p.Get()
   268  			}
   269  		}
   270  	})
   271  }
   272  
   273  // Simulate object starvation in order to force Ps to steal objects
   274  // from other Ps.
   275  func BenchmarkPoolStarvation(b *testing.B) {
   276  	var p Pool
   277  	count := 100
   278  	// Reduce number of putted objects by 33 %. It creates objects starvation
   279  	// that force P-local storage to steal objects from other Ps.
   280  	countStarved := count - int(float32(count)*0.33)
   281  	b.RunParallel(func(pb *testing.PB) {
   282  		for pb.Next() {
   283  			for b := 0; b < countStarved; b++ {
   284  				p.Put(1)
   285  			}
   286  			for b := 0; b < count; b++ {
   287  				p.Get()
   288  			}
   289  		}
   290  	})
   291  }
   292  
   293  var globalSink any
   294  
   295  func BenchmarkPoolSTW(b *testing.B) {
   296  	// Take control of GC.
   297  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   298  
   299  	var mstats runtime.MemStats
   300  	var pauses []uint64
   301  
   302  	var p Pool
   303  	for i := 0; i < b.N; i++ {
   304  		// Put a large number of items into a pool.
   305  		const N = 100000
   306  		var item any = 42
   307  		for i := 0; i < N; i++ {
   308  			p.Put(item)
   309  		}
   310  		// Do a GC.
   311  		runtime.GC()
   312  		// Record pause time.
   313  		runtime.ReadMemStats(&mstats)
   314  		pauses = append(pauses, mstats.PauseNs[(mstats.NumGC+255)%256])
   315  	}
   316  
   317  	// Get pause time stats.
   318  	sort.Slice(pauses, func(i, j int) bool { return pauses[i] < pauses[j] })
   319  	var total uint64
   320  	for _, ns := range pauses {
   321  		total += ns
   322  	}
   323  	// ns/op for this benchmark is average STW time.
   324  	b.ReportMetric(float64(total)/float64(b.N), "ns/op")
   325  	b.ReportMetric(float64(pauses[len(pauses)*95/100]), "p95-ns/STW")
   326  	b.ReportMetric(float64(pauses[len(pauses)*50/100]), "p50-ns/STW")
   327  }
   328  
   329  func BenchmarkPoolExpensiveNew(b *testing.B) {
   330  	// Populate a pool with items that are expensive to construct
   331  	// to stress pool cleanup and subsequent reconstruction.
   332  
   333  	// Create a ballast so the GC has a non-zero heap size and
   334  	// runs at reasonable times.
   335  	globalSink = make([]byte, 8<<20)
   336  	defer func() { globalSink = nil }()
   337  
   338  	// Create a pool that's "expensive" to fill.
   339  	var p Pool
   340  	var nNew uint64
   341  	p.New = func() any {
   342  		atomic.AddUint64(&nNew, 1)
   343  		time.Sleep(time.Millisecond)
   344  		return 42
   345  	}
   346  	var mstats1, mstats2 runtime.MemStats
   347  	runtime.ReadMemStats(&mstats1)
   348  	b.RunParallel(func(pb *testing.PB) {
   349  		// Simulate 100X the number of goroutines having items
   350  		// checked out from the Pool simultaneously.
   351  		items := make([]any, 100)
   352  		var sink []byte
   353  		for pb.Next() {
   354  			// Stress the pool.
   355  			for i := range items {
   356  				items[i] = p.Get()
   357  				// Simulate doing some work with this
   358  				// item checked out.
   359  				sink = make([]byte, 32<<10)
   360  			}
   361  			for i, v := range items {
   362  				p.Put(v)
   363  				items[i] = nil
   364  			}
   365  		}
   366  		_ = sink
   367  	})
   368  	runtime.ReadMemStats(&mstats2)
   369  
   370  	b.ReportMetric(float64(mstats2.NumGC-mstats1.NumGC)/float64(b.N), "GCs/op")
   371  	b.ReportMetric(float64(nNew)/float64(b.N), "New/op")
   372  }
   373  

View as plain text