Source file src/runtime/rwmutex_test.go

     1  // Copyright 2017 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // GOMAXPROCS=10 go test
     6  
     7  // This is a copy of sync/rwmutex_test.go rewritten to test the
     8  // runtime rwmutex.
     9  
    10  package runtime_test
    11  
    12  import (
    13  	"fmt"
    14  	. "runtime"
    15  	"runtime/debug"
    16  	"sync/atomic"
    17  	"testing"
    18  )
    19  
    20  func parallelReader(m *RWMutex, clocked chan bool, cunlock *uint32, cdone chan bool) {
    21  	m.RLock()
    22  	clocked <- true
    23  	for atomic.LoadUint32(cunlock) == 0 {
    24  	}
    25  	m.RUnlock()
    26  	cdone <- true
    27  }
    28  
    29  func doTestParallelReaders(numReaders int) {
    30  	GOMAXPROCS(numReaders + 1)
    31  	var m RWMutex
    32  	clocked := make(chan bool, numReaders)
    33  	var cunlock uint32
    34  	cdone := make(chan bool)
    35  	for i := 0; i < numReaders; i++ {
    36  		go parallelReader(&m, clocked, &cunlock, cdone)
    37  	}
    38  	// Wait for all parallel RLock()s to succeed.
    39  	for i := 0; i < numReaders; i++ {
    40  		<-clocked
    41  	}
    42  	atomic.StoreUint32(&cunlock, 1)
    43  	// Wait for the goroutines to finish.
    44  	for i := 0; i < numReaders; i++ {
    45  		<-cdone
    46  	}
    47  }
    48  
    49  func TestParallelRWMutexReaders(t *testing.T) {
    50  	if GOARCH == "wasm" {
    51  		t.Skip("wasm has no threads yet")
    52  	}
    53  	defer GOMAXPROCS(GOMAXPROCS(-1))
    54  	// If runtime triggers a forced GC during this test then it will deadlock,
    55  	// since the goroutines can't be stopped/preempted.
    56  	// Disable GC for this test (see issue #10958).
    57  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
    58  	// SetGCPercent waits until the mark phase is over, but the runtime
    59  	// also preempts at the start of the sweep phase, so make sure that's
    60  	// done too.
    61  	GC()
    62  
    63  	doTestParallelReaders(1)
    64  	doTestParallelReaders(3)
    65  	doTestParallelReaders(4)
    66  }
    67  
    68  func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
    69  	for i := 0; i < num_iterations; i++ {
    70  		rwm.RLock()
    71  		n := atomic.AddInt32(activity, 1)
    72  		if n < 1 || n >= 10000 {
    73  			panic(fmt.Sprintf("wlock(%d)\n", n))
    74  		}
    75  		for i := 0; i < 100; i++ {
    76  		}
    77  		atomic.AddInt32(activity, -1)
    78  		rwm.RUnlock()
    79  	}
    80  	cdone <- true
    81  }
    82  
    83  func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
    84  	for i := 0; i < num_iterations; i++ {
    85  		rwm.Lock()
    86  		n := atomic.AddInt32(activity, 10000)
    87  		if n != 10000 {
    88  			panic(fmt.Sprintf("wlock(%d)\n", n))
    89  		}
    90  		for i := 0; i < 100; i++ {
    91  		}
    92  		atomic.AddInt32(activity, -10000)
    93  		rwm.Unlock()
    94  	}
    95  	cdone <- true
    96  }
    97  
    98  func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
    99  	GOMAXPROCS(gomaxprocs)
   100  	// Number of active readers + 10000 * number of active writers.
   101  	var activity int32
   102  	var rwm RWMutex
   103  	cdone := make(chan bool)
   104  	go writer(&rwm, num_iterations, &activity, cdone)
   105  	var i int
   106  	for i = 0; i < numReaders/2; i++ {
   107  		go reader(&rwm, num_iterations, &activity, cdone)
   108  	}
   109  	go writer(&rwm, num_iterations, &activity, cdone)
   110  	for ; i < numReaders; i++ {
   111  		go reader(&rwm, num_iterations, &activity, cdone)
   112  	}
   113  	// Wait for the 2 writers and all readers to finish.
   114  	for i := 0; i < 2+numReaders; i++ {
   115  		<-cdone
   116  	}
   117  }
   118  
   119  func TestRWMutex(t *testing.T) {
   120  	defer GOMAXPROCS(GOMAXPROCS(-1))
   121  	n := 1000
   122  	if testing.Short() {
   123  		n = 5
   124  	}
   125  	HammerRWMutex(1, 1, n)
   126  	HammerRWMutex(1, 3, n)
   127  	HammerRWMutex(1, 10, n)
   128  	HammerRWMutex(4, 1, n)
   129  	HammerRWMutex(4, 3, n)
   130  	HammerRWMutex(4, 10, n)
   131  	HammerRWMutex(10, 1, n)
   132  	HammerRWMutex(10, 3, n)
   133  	HammerRWMutex(10, 10, n)
   134  	HammerRWMutex(10, 5, n)
   135  }
   136  
   137  func BenchmarkRWMutexUncontended(b *testing.B) {
   138  	type PaddedRWMutex struct {
   139  		RWMutex
   140  		pad [32]uint32
   141  	}
   142  	b.RunParallel(func(pb *testing.PB) {
   143  		var rwm PaddedRWMutex
   144  		for pb.Next() {
   145  			rwm.RLock()
   146  			rwm.RLock()
   147  			rwm.RUnlock()
   148  			rwm.RUnlock()
   149  			rwm.Lock()
   150  			rwm.Unlock()
   151  		}
   152  	})
   153  }
   154  
   155  func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
   156  	var rwm RWMutex
   157  	b.RunParallel(func(pb *testing.PB) {
   158  		foo := 0
   159  		for pb.Next() {
   160  			foo++
   161  			if foo%writeRatio == 0 {
   162  				rwm.Lock()
   163  				rwm.Unlock()
   164  			} else {
   165  				rwm.RLock()
   166  				for i := 0; i != localWork; i += 1 {
   167  					foo *= 2
   168  					foo /= 2
   169  				}
   170  				rwm.RUnlock()
   171  			}
   172  		}
   173  		_ = foo
   174  	})
   175  }
   176  
   177  func BenchmarkRWMutexWrite100(b *testing.B) {
   178  	benchmarkRWMutex(b, 0, 100)
   179  }
   180  
   181  func BenchmarkRWMutexWrite10(b *testing.B) {
   182  	benchmarkRWMutex(b, 0, 10)
   183  }
   184  
   185  func BenchmarkRWMutexWorkWrite100(b *testing.B) {
   186  	benchmarkRWMutex(b, 100, 100)
   187  }
   188  
   189  func BenchmarkRWMutexWorkWrite10(b *testing.B) {
   190  	benchmarkRWMutex(b, 100, 10)
   191  }
   192  

View as plain text