Source file
src/sync/rwmutex.go
1
2
3
4
5 package sync
6
7 import (
8 "internal/race"
9 "sync/atomic"
10 "unsafe"
11 )
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28 type RWMutex struct {
29 w Mutex
30 writerSem uint32
31 readerSem uint32
32 readerCount int32
33 readerWait int32
34 }
35
36 const rwmutexMaxReaders = 1 << 30
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 func (rw *RWMutex) RLock() {
57 if race.Enabled {
58 _ = rw.w.state
59 race.Disable()
60 }
61 if atomic.AddInt32(&rw.readerCount, 1) < 0 {
62
63 runtime_SemacquireMutex(&rw.readerSem, false, 0)
64 }
65 if race.Enabled {
66 race.Enable()
67 race.Acquire(unsafe.Pointer(&rw.readerSem))
68 }
69 }
70
71
72
73
74
75
76 func (rw *RWMutex) TryRLock() bool {
77 if race.Enabled {
78 _ = rw.w.state
79 race.Disable()
80 }
81 for {
82 c := atomic.LoadInt32(&rw.readerCount)
83 if c < 0 {
84 if race.Enabled {
85 race.Enable()
86 }
87 return false
88 }
89 if atomic.CompareAndSwapInt32(&rw.readerCount, c, c+1) {
90 if race.Enabled {
91 race.Enable()
92 race.Acquire(unsafe.Pointer(&rw.readerSem))
93 }
94 return true
95 }
96 }
97 }
98
99
100
101
102
103 func (rw *RWMutex) RUnlock() {
104 if race.Enabled {
105 _ = rw.w.state
106 race.ReleaseMerge(unsafe.Pointer(&rw.writerSem))
107 race.Disable()
108 }
109 if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
110
111 rw.rUnlockSlow(r)
112 }
113 if race.Enabled {
114 race.Enable()
115 }
116 }
117
118 func (rw *RWMutex) rUnlockSlow(r int32) {
119 if r+1 == 0 || r+1 == -rwmutexMaxReaders {
120 race.Enable()
121 throw("sync: RUnlock of unlocked RWMutex")
122 }
123
124 if atomic.AddInt32(&rw.readerWait, -1) == 0 {
125
126 runtime_Semrelease(&rw.writerSem, false, 1)
127 }
128 }
129
130
131
132
133 func (rw *RWMutex) Lock() {
134 if race.Enabled {
135 _ = rw.w.state
136 race.Disable()
137 }
138
139 rw.w.Lock()
140
141 r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders
142
143 if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
144 runtime_SemacquireMutex(&rw.writerSem, false, 0)
145 }
146 if race.Enabled {
147 race.Enable()
148 race.Acquire(unsafe.Pointer(&rw.readerSem))
149 race.Acquire(unsafe.Pointer(&rw.writerSem))
150 }
151 }
152
153
154
155
156
157
158 func (rw *RWMutex) TryLock() bool {
159 if race.Enabled {
160 _ = rw.w.state
161 race.Disable()
162 }
163 if !rw.w.TryLock() {
164 if race.Enabled {
165 race.Enable()
166 }
167 return false
168 }
169 if !atomic.CompareAndSwapInt32(&rw.readerCount, 0, -rwmutexMaxReaders) {
170 rw.w.Unlock()
171 if race.Enabled {
172 race.Enable()
173 }
174 return false
175 }
176 if race.Enabled {
177 race.Enable()
178 race.Acquire(unsafe.Pointer(&rw.readerSem))
179 race.Acquire(unsafe.Pointer(&rw.writerSem))
180 }
181 return true
182 }
183
184
185
186
187
188
189
190 func (rw *RWMutex) Unlock() {
191 if race.Enabled {
192 _ = rw.w.state
193 race.Release(unsafe.Pointer(&rw.readerSem))
194 race.Disable()
195 }
196
197
198 r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
199 if r >= rwmutexMaxReaders {
200 race.Enable()
201 throw("sync: Unlock of unlocked RWMutex")
202 }
203
204 for i := 0; i < int(r); i++ {
205 runtime_Semrelease(&rw.readerSem, false, 0)
206 }
207
208 rw.w.Unlock()
209 if race.Enabled {
210 race.Enable()
211 }
212 }
213
214
215
216 func (rw *RWMutex) RLocker() Locker {
217 return (*rlocker)(rw)
218 }
219
220 type rlocker RWMutex
221
222 func (r *rlocker) Lock() { (*RWMutex)(r).RLock() }
223 func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() }
224
View as plain text