Source file src/sync/mutex.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Package sync provides basic synchronization primitives such as mutual
     6  // exclusion locks. Other than the Once and WaitGroup types, most are intended
     7  // for use by low-level library routines. Higher-level synchronization is
     8  // better done via channels and communication.
     9  //
    10  // Values containing the types defined in this package should not be copied.
    11  package sync
    12  
    13  import (
    14  	"internal/race"
    15  	"sync/atomic"
    16  	"unsafe"
    17  )
    18  
    19  func throw(string) // provided by runtime
    20  
    21  // A Mutex is a mutual exclusion lock.
    22  // The zero value for a Mutex is an unlocked mutex.
    23  //
    24  // A Mutex must not be copied after first use.
    25  type Mutex struct {
    26  	state int32
    27  	sema  uint32
    28  }
    29  
    30  // A Locker represents an object that can be locked and unlocked.
    31  type Locker interface {
    32  	Lock()
    33  	Unlock()
    34  }
    35  
    36  const (
    37  	mutexLocked = 1 << iota // mutex is locked
    38  	mutexWoken
    39  	mutexStarving
    40  	mutexWaiterShift = iota
    41  
    42  	// Mutex fairness.
    43  	//
    44  	// Mutex can be in 2 modes of operations: normal and starvation.
    45  	// In normal mode waiters are queued in FIFO order, but a woken up waiter
    46  	// does not own the mutex and competes with new arriving goroutines over
    47  	// the ownership. New arriving goroutines have an advantage -- they are
    48  	// already running on CPU and there can be lots of them, so a woken up
    49  	// waiter has good chances of losing. In such case it is queued at front
    50  	// of the wait queue. If a waiter fails to acquire the mutex for more than 1ms,
    51  	// it switches mutex to the starvation mode.
    52  	//
    53  	// In starvation mode ownership of the mutex is directly handed off from
    54  	// the unlocking goroutine to the waiter at the front of the queue.
    55  	// New arriving goroutines don't try to acquire the mutex even if it appears
    56  	// to be unlocked, and don't try to spin. Instead they queue themselves at
    57  	// the tail of the wait queue.
    58  	//
    59  	// If a waiter receives ownership of the mutex and sees that either
    60  	// (1) it is the last waiter in the queue, or (2) it waited for less than 1 ms,
    61  	// it switches mutex back to normal operation mode.
    62  	//
    63  	// Normal mode has considerably better performance as a goroutine can acquire
    64  	// a mutex several times in a row even if there are blocked waiters.
    65  	// Starvation mode is important to prevent pathological cases of tail latency.
    66  	starvationThresholdNs = 1e6
    67  )
    68  
    69  // Lock locks m.
    70  // If the lock is already in use, the calling goroutine
    71  // blocks until the mutex is available.
    72  func (m *Mutex) Lock() {
    73  	// Fast path: grab unlocked mutex.
    74  	if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
    75  		if race.Enabled {
    76  			race.Acquire(unsafe.Pointer(m))
    77  		}
    78  		return
    79  	}
    80  	// Slow path (outlined so that the fast path can be inlined)
    81  	m.lockSlow()
    82  }
    83  
    84  // TryLock tries to lock m and reports whether it succeeded.
    85  //
    86  // Note that while correct uses of TryLock do exist, they are rare,
    87  // and use of TryLock is often a sign of a deeper problem
    88  // in a particular use of mutexes.
    89  func (m *Mutex) TryLock() bool {
    90  	old := m.state
    91  	if old&(mutexLocked|mutexStarving) != 0 {
    92  		return false
    93  	}
    94  
    95  	// There may be a goroutine waiting for the mutex, but we are
    96  	// running now and can try to grab the mutex before that
    97  	// goroutine wakes up.
    98  	if !atomic.CompareAndSwapInt32(&m.state, old, old|mutexLocked) {
    99  		return false
   100  	}
   101  
   102  	if race.Enabled {
   103  		race.Acquire(unsafe.Pointer(m))
   104  	}
   105  	return true
   106  }
   107  
   108  func (m *Mutex) lockSlow() {
   109  	var waitStartTime int64
   110  	starving := false
   111  	awoke := false
   112  	iter := 0
   113  	old := m.state
   114  	for {
   115  		// Don't spin in starvation mode, ownership is handed off to waiters
   116  		// so we won't be able to acquire the mutex anyway.
   117  		if old&(mutexLocked|mutexStarving) == mutexLocked && runtime_canSpin(iter) {
   118  			// Active spinning makes sense.
   119  			// Try to set mutexWoken flag to inform Unlock
   120  			// to not wake other blocked goroutines.
   121  			if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
   122  				atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
   123  				awoke = true
   124  			}
   125  			runtime_doSpin()
   126  			iter++
   127  			old = m.state
   128  			continue
   129  		}
   130  		new := old
   131  		// Don't try to acquire starving mutex, new arriving goroutines must queue.
   132  		if old&mutexStarving == 0 {
   133  			new |= mutexLocked
   134  		}
   135  		if old&(mutexLocked|mutexStarving) != 0 {
   136  			new += 1 << mutexWaiterShift
   137  		}
   138  		// The current goroutine switches mutex to starvation mode.
   139  		// But if the mutex is currently unlocked, don't do the switch.
   140  		// Unlock expects that starving mutex has waiters, which will not
   141  		// be true in this case.
   142  		if starving && old&mutexLocked != 0 {
   143  			new |= mutexStarving
   144  		}
   145  		if awoke {
   146  			// The goroutine has been woken from sleep,
   147  			// so we need to reset the flag in either case.
   148  			if new&mutexWoken == 0 {
   149  				throw("sync: inconsistent mutex state")
   150  			}
   151  			new &^= mutexWoken
   152  		}
   153  		if atomic.CompareAndSwapInt32(&m.state, old, new) {
   154  			if old&(mutexLocked|mutexStarving) == 0 {
   155  				break // locked the mutex with CAS
   156  			}
   157  			// If we were already waiting before, queue at the front of the queue.
   158  			queueLifo := waitStartTime != 0
   159  			if waitStartTime == 0 {
   160  				waitStartTime = runtime_nanotime()
   161  			}
   162  			runtime_SemacquireMutex(&m.sema, queueLifo, 1)
   163  			starving = starving || runtime_nanotime()-waitStartTime > starvationThresholdNs
   164  			old = m.state
   165  			if old&mutexStarving != 0 {
   166  				// If this goroutine was woken and mutex is in starvation mode,
   167  				// ownership was handed off to us but mutex is in somewhat
   168  				// inconsistent state: mutexLocked is not set and we are still
   169  				// accounted as waiter. Fix that.
   170  				if old&(mutexLocked|mutexWoken) != 0 || old>>mutexWaiterShift == 0 {
   171  					throw("sync: inconsistent mutex state")
   172  				}
   173  				delta := int32(mutexLocked - 1<<mutexWaiterShift)
   174  				if !starving || old>>mutexWaiterShift == 1 {
   175  					// Exit starvation mode.
   176  					// Critical to do it here and consider wait time.
   177  					// Starvation mode is so inefficient, that two goroutines
   178  					// can go lock-step infinitely once they switch mutex
   179  					// to starvation mode.
   180  					delta -= mutexStarving
   181  				}
   182  				atomic.AddInt32(&m.state, delta)
   183  				break
   184  			}
   185  			awoke = true
   186  			iter = 0
   187  		} else {
   188  			old = m.state
   189  		}
   190  	}
   191  
   192  	if race.Enabled {
   193  		race.Acquire(unsafe.Pointer(m))
   194  	}
   195  }
   196  
   197  // Unlock unlocks m.
   198  // It is a run-time error if m is not locked on entry to Unlock.
   199  //
   200  // A locked Mutex is not associated with a particular goroutine.
   201  // It is allowed for one goroutine to lock a Mutex and then
   202  // arrange for another goroutine to unlock it.
   203  func (m *Mutex) Unlock() {
   204  	if race.Enabled {
   205  		_ = m.state
   206  		race.Release(unsafe.Pointer(m))
   207  	}
   208  
   209  	// Fast path: drop lock bit.
   210  	new := atomic.AddInt32(&m.state, -mutexLocked)
   211  	if new != 0 {
   212  		// Outlined slow path to allow inlining the fast path.
   213  		// To hide unlockSlow during tracing we skip one extra frame when tracing GoUnblock.
   214  		m.unlockSlow(new)
   215  	}
   216  }
   217  
   218  func (m *Mutex) unlockSlow(new int32) {
   219  	if (new+mutexLocked)&mutexLocked == 0 {
   220  		throw("sync: unlock of unlocked mutex")
   221  	}
   222  	if new&mutexStarving == 0 {
   223  		old := new
   224  		for {
   225  			// If there are no waiters or a goroutine has already
   226  			// been woken or grabbed the lock, no need to wake anyone.
   227  			// In starvation mode ownership is directly handed off from unlocking
   228  			// goroutine to the next waiter. We are not part of this chain,
   229  			// since we did not observe mutexStarving when we unlocked the mutex above.
   230  			// So get off the way.
   231  			if old>>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken|mutexStarving) != 0 {
   232  				return
   233  			}
   234  			// Grab the right to wake someone.
   235  			new = (old - 1<<mutexWaiterShift) | mutexWoken
   236  			if atomic.CompareAndSwapInt32(&m.state, old, new) {
   237  				runtime_Semrelease(&m.sema, false, 1)
   238  				return
   239  			}
   240  			old = m.state
   241  		}
   242  	} else {
   243  		// Starving mode: handoff mutex ownership to the next waiter, and yield
   244  		// our time slice so that the next waiter can start to run immediately.
   245  		// Note: mutexLocked is not set, the waiter will set it after wakeup.
   246  		// But mutex is still considered locked if mutexStarving is set,
   247  		// so new coming goroutines won't acquire it.
   248  		runtime_Semrelease(&m.sema, true, 1)
   249  	}
   250  }
   251  

View as plain text