Source file src/runtime/lockrank_on.go

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.staticlockranking
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // worldIsStopped is accessed atomically to track world-stops. 1 == world
    15  // stopped.
    16  var worldIsStopped uint32
    17  
    18  // lockRankStruct is embedded in mutex
    19  type lockRankStruct struct {
    20  	// static lock ranking of the lock
    21  	rank lockRank
    22  	// pad field to make sure lockRankStruct is a multiple of 8 bytes, even on
    23  	// 32-bit systems.
    24  	pad int
    25  }
    26  
    27  func lockInit(l *mutex, rank lockRank) {
    28  	l.rank = rank
    29  }
    30  
    31  func getLockRank(l *mutex) lockRank {
    32  	return l.rank
    33  }
    34  
    35  // lockWithRank is like lock(l), but allows the caller to specify a lock rank
    36  // when acquiring a non-static lock.
    37  //
    38  // Note that we need to be careful about stack splits:
    39  //
    40  // This function is not nosplit, thus it may split at function entry. This may
    41  // introduce a new edge in the lock order, but it is no different from any
    42  // other (nosplit) call before this call (including the call to lock() itself).
    43  //
    44  // However, we switch to the systemstack to record the lock held to ensure that
    45  // we record an accurate lock ordering. e.g., without systemstack, a stack
    46  // split on entry to lock2() would record stack split locks as taken after l,
    47  // even though l is not actually locked yet.
    48  func lockWithRank(l *mutex, rank lockRank) {
    49  	if l == &debuglock || l == &paniclk {
    50  		// debuglock is only used for println/printlock(). Don't do lock
    51  		// rank recording for it, since print/println are used when
    52  		// printing out a lock ordering problem below.
    53  		//
    54  		// paniclk is only used for fatal throw/panic. Don't do lock
    55  		// ranking recording for it, since we throw after reporting a
    56  		// lock ordering problem. Additionally, paniclk may be taken
    57  		// after effectively any lock (anywhere we might panic), which
    58  		// the partial order doesn't cover.
    59  		lock2(l)
    60  		return
    61  	}
    62  	if rank == 0 {
    63  		rank = lockRankLeafRank
    64  	}
    65  	gp := getg()
    66  	// Log the new class.
    67  	systemstack(func() {
    68  		i := gp.m.locksHeldLen
    69  		if i >= len(gp.m.locksHeld) {
    70  			throw("too many locks held concurrently for rank checking")
    71  		}
    72  		gp.m.locksHeld[i].rank = rank
    73  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
    74  		gp.m.locksHeldLen++
    75  
    76  		// i is the index of the lock being acquired
    77  		if i > 0 {
    78  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
    79  		}
    80  		lock2(l)
    81  	})
    82  }
    83  
    84  // nosplit to ensure it can be called in as many contexts as possible.
    85  //go:nosplit
    86  func printHeldLocks(gp *g) {
    87  	if gp.m.locksHeldLen == 0 {
    88  		println("<none>")
    89  		return
    90  	}
    91  
    92  	for j, held := range gp.m.locksHeld[:gp.m.locksHeldLen] {
    93  		println(j, ":", held.rank.String(), held.rank, unsafe.Pointer(gp.m.locksHeld[j].lockAddr))
    94  	}
    95  }
    96  
    97  // acquireLockRank acquires a rank which is not associated with a mutex lock
    98  //
    99  // This function may be called in nosplit context and thus must be nosplit.
   100  //go:nosplit
   101  func acquireLockRank(rank lockRank) {
   102  	gp := getg()
   103  	// Log the new class. See comment on lockWithRank.
   104  	systemstack(func() {
   105  		i := gp.m.locksHeldLen
   106  		if i >= len(gp.m.locksHeld) {
   107  			throw("too many locks held concurrently for rank checking")
   108  		}
   109  		gp.m.locksHeld[i].rank = rank
   110  		gp.m.locksHeld[i].lockAddr = 0
   111  		gp.m.locksHeldLen++
   112  
   113  		// i is the index of the lock being acquired
   114  		if i > 0 {
   115  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   116  		}
   117  	})
   118  }
   119  
   120  // checkRanks checks if goroutine g, which has mostly recently acquired a lock
   121  // with rank 'prevRank', can now acquire a lock with rank 'rank'.
   122  //
   123  //go:systemstack
   124  func checkRanks(gp *g, prevRank, rank lockRank) {
   125  	rankOK := false
   126  	if rank < prevRank {
   127  		// If rank < prevRank, then we definitely have a rank error
   128  		rankOK = false
   129  	} else if rank == lockRankLeafRank {
   130  		// If new lock is a leaf lock, then the preceding lock can
   131  		// be anything except another leaf lock.
   132  		rankOK = prevRank < lockRankLeafRank
   133  	} else {
   134  		// We've now verified the total lock ranking, but we
   135  		// also enforce the partial ordering specified by
   136  		// lockPartialOrder as well. Two locks with the same rank
   137  		// can only be acquired at the same time if explicitly
   138  		// listed in the lockPartialOrder table.
   139  		list := lockPartialOrder[rank]
   140  		for _, entry := range list {
   141  			if entry == prevRank {
   142  				rankOK = true
   143  				break
   144  			}
   145  		}
   146  	}
   147  	if !rankOK {
   148  		printlock()
   149  		println(gp.m.procid, " ======")
   150  		printHeldLocks(gp)
   151  		throw("lock ordering problem")
   152  	}
   153  }
   154  
   155  // See comment on lockWithRank regarding stack splitting.
   156  func unlockWithRank(l *mutex) {
   157  	if l == &debuglock || l == &paniclk {
   158  		// See comment at beginning of lockWithRank.
   159  		unlock2(l)
   160  		return
   161  	}
   162  	gp := getg()
   163  	systemstack(func() {
   164  		found := false
   165  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   166  			if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   167  				found = true
   168  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   169  				gp.m.locksHeldLen--
   170  				break
   171  			}
   172  		}
   173  		if !found {
   174  			println(gp.m.procid, ":", l.rank.String(), l.rank, l)
   175  			throw("unlock without matching lock acquire")
   176  		}
   177  		unlock2(l)
   178  	})
   179  }
   180  
   181  // releaseLockRank releases a rank which is not associated with a mutex lock
   182  //
   183  // This function may be called in nosplit context and thus must be nosplit.
   184  //go:nosplit
   185  func releaseLockRank(rank lockRank) {
   186  	gp := getg()
   187  	systemstack(func() {
   188  		found := false
   189  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   190  			if gp.m.locksHeld[i].rank == rank && gp.m.locksHeld[i].lockAddr == 0 {
   191  				found = true
   192  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   193  				gp.m.locksHeldLen--
   194  				break
   195  			}
   196  		}
   197  		if !found {
   198  			println(gp.m.procid, ":", rank.String(), rank)
   199  			throw("lockRank release without matching lockRank acquire")
   200  		}
   201  	})
   202  }
   203  
   204  // See comment on lockWithRank regarding stack splitting.
   205  func lockWithRankMayAcquire(l *mutex, rank lockRank) {
   206  	gp := getg()
   207  	if gp.m.locksHeldLen == 0 {
   208  		// No possibility of lock ordering problem if no other locks held
   209  		return
   210  	}
   211  
   212  	systemstack(func() {
   213  		i := gp.m.locksHeldLen
   214  		if i >= len(gp.m.locksHeld) {
   215  			throw("too many locks held concurrently for rank checking")
   216  		}
   217  		// Temporarily add this lock to the locksHeld list, so
   218  		// checkRanks() will print out list, including this lock, if there
   219  		// is a lock ordering problem.
   220  		gp.m.locksHeld[i].rank = rank
   221  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
   222  		gp.m.locksHeldLen++
   223  		checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   224  		gp.m.locksHeldLen--
   225  	})
   226  }
   227  
   228  // nosplit to ensure it can be called in as many contexts as possible.
   229  //go:nosplit
   230  func checkLockHeld(gp *g, l *mutex) bool {
   231  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   232  		if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   233  			return true
   234  		}
   235  	}
   236  	return false
   237  }
   238  
   239  // assertLockHeld throws if l is not held by the caller.
   240  //
   241  // nosplit to ensure it can be called in as many contexts as possible.
   242  //go:nosplit
   243  func assertLockHeld(l *mutex) {
   244  	gp := getg()
   245  
   246  	held := checkLockHeld(gp, l)
   247  	if held {
   248  		return
   249  	}
   250  
   251  	// Crash from system stack to avoid splits that may cause
   252  	// additional issues.
   253  	systemstack(func() {
   254  		printlock()
   255  		print("caller requires lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   256  		printHeldLocks(gp)
   257  		throw("not holding required lock!")
   258  	})
   259  }
   260  
   261  // assertRankHeld throws if a mutex with rank r is not held by the caller.
   262  //
   263  // This is less precise than assertLockHeld, but can be used in places where a
   264  // pointer to the exact mutex is not available.
   265  //
   266  // nosplit to ensure it can be called in as many contexts as possible.
   267  //go:nosplit
   268  func assertRankHeld(r lockRank) {
   269  	gp := getg()
   270  
   271  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   272  		if gp.m.locksHeld[i].rank == r {
   273  			return
   274  		}
   275  	}
   276  
   277  	// Crash from system stack to avoid splits that may cause
   278  	// additional issues.
   279  	systemstack(func() {
   280  		printlock()
   281  		print("caller requires lock with rank ", r.String(), "), holding:\n")
   282  		printHeldLocks(gp)
   283  		throw("not holding required lock!")
   284  	})
   285  }
   286  
   287  // worldStopped notes that the world is stopped.
   288  //
   289  // Caller must hold worldsema.
   290  //
   291  // nosplit to ensure it can be called in as many contexts as possible.
   292  //go:nosplit
   293  func worldStopped() {
   294  	if stopped := atomic.Xadd(&worldIsStopped, 1); stopped != 1 {
   295  		systemstack(func() {
   296  			print("world stop count=", stopped, "\n")
   297  			throw("recursive world stop")
   298  		})
   299  	}
   300  }
   301  
   302  // worldStarted that the world is starting.
   303  //
   304  // Caller must hold worldsema.
   305  //
   306  // nosplit to ensure it can be called in as many contexts as possible.
   307  //go:nosplit
   308  func worldStarted() {
   309  	if stopped := atomic.Xadd(&worldIsStopped, -1); stopped != 0 {
   310  		systemstack(func() {
   311  			print("world stop count=", stopped, "\n")
   312  			throw("released non-stopped world stop")
   313  		})
   314  	}
   315  }
   316  
   317  // nosplit to ensure it can be called in as many contexts as possible.
   318  //go:nosplit
   319  func checkWorldStopped() bool {
   320  	stopped := atomic.Load(&worldIsStopped)
   321  	if stopped > 1 {
   322  		systemstack(func() {
   323  			print("inconsistent world stop count=", stopped, "\n")
   324  			throw("inconsistent world stop count")
   325  		})
   326  	}
   327  
   328  	return stopped == 1
   329  }
   330  
   331  // assertWorldStopped throws if the world is not stopped. It does not check
   332  // which M stopped the world.
   333  //
   334  // nosplit to ensure it can be called in as many contexts as possible.
   335  //go:nosplit
   336  func assertWorldStopped() {
   337  	if checkWorldStopped() {
   338  		return
   339  	}
   340  
   341  	throw("world not stopped")
   342  }
   343  
   344  // assertWorldStoppedOrLockHeld throws if the world is not stopped and the
   345  // passed lock is not held.
   346  //
   347  // nosplit to ensure it can be called in as many contexts as possible.
   348  //go:nosplit
   349  func assertWorldStoppedOrLockHeld(l *mutex) {
   350  	if checkWorldStopped() {
   351  		return
   352  	}
   353  
   354  	gp := getg()
   355  	held := checkLockHeld(gp, l)
   356  	if held {
   357  		return
   358  	}
   359  
   360  	// Crash from system stack to avoid splits that may cause
   361  	// additional issues.
   362  	systemstack(func() {
   363  		printlock()
   364  		print("caller requires world stop or lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   365  		println("<no world stop>")
   366  		printHeldLocks(gp)
   367  		throw("no world stop or required lock!")
   368  	})
   369  }
   370  

View as plain text