Source file
src/runtime/lock_futex.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "runtime/internal/atomic"
11 "unsafe"
12 )
13
14
15
16
17
18
19
20
21
22
23
24
25 const (
26 mutex_unlocked = 0
27 mutex_locked = 1
28 mutex_sleeping = 2
29
30 active_spin = 4
31 active_spin_cnt = 30
32 passive_spin = 1
33 )
34
35
36
37
38
39
40
41
42 func key32(p *uintptr) *uint32 {
43 return (*uint32)(unsafe.Pointer(p))
44 }
45
46 func lock(l *mutex) {
47 lockWithRank(l, getLockRank(l))
48 }
49
50 func lock2(l *mutex) {
51 gp := getg()
52
53 if gp.m.locks < 0 {
54 throw("runtime·lock: lock count")
55 }
56 gp.m.locks++
57
58
59 v := atomic.Xchg(key32(&l.key), mutex_locked)
60 if v == mutex_unlocked {
61 return
62 }
63
64
65
66
67
68
69
70
71 wait := v
72
73
74
75 spin := 0
76 if ncpu > 1 {
77 spin = active_spin
78 }
79 for {
80
81 for i := 0; i < spin; i++ {
82 for l.key == mutex_unlocked {
83 if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
84 return
85 }
86 }
87 procyield(active_spin_cnt)
88 }
89
90
91 for i := 0; i < passive_spin; i++ {
92 for l.key == mutex_unlocked {
93 if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
94 return
95 }
96 }
97 osyield()
98 }
99
100
101 v = atomic.Xchg(key32(&l.key), mutex_sleeping)
102 if v == mutex_unlocked {
103 return
104 }
105 wait = mutex_sleeping
106 futexsleep(key32(&l.key), mutex_sleeping, -1)
107 }
108 }
109
110 func unlock(l *mutex) {
111 unlockWithRank(l)
112 }
113
114 func unlock2(l *mutex) {
115 v := atomic.Xchg(key32(&l.key), mutex_unlocked)
116 if v == mutex_unlocked {
117 throw("unlock of unlocked lock")
118 }
119 if v == mutex_sleeping {
120 futexwakeup(key32(&l.key), 1)
121 }
122
123 gp := getg()
124 gp.m.locks--
125 if gp.m.locks < 0 {
126 throw("runtime·unlock: lock count")
127 }
128 if gp.m.locks == 0 && gp.preempt {
129 gp.stackguard0 = stackPreempt
130 }
131 }
132
133
134 func noteclear(n *note) {
135 n.key = 0
136 }
137
138 func notewakeup(n *note) {
139 old := atomic.Xchg(key32(&n.key), 1)
140 if old != 0 {
141 print("notewakeup - double wakeup (", old, ")\n")
142 throw("notewakeup - double wakeup")
143 }
144 futexwakeup(key32(&n.key), 1)
145 }
146
147 func notesleep(n *note) {
148 gp := getg()
149 if gp != gp.m.g0 {
150 throw("notesleep not on g0")
151 }
152 ns := int64(-1)
153 if *cgo_yield != nil {
154
155 ns = 10e6
156 }
157 for atomic.Load(key32(&n.key)) == 0 {
158 gp.m.blocked = true
159 futexsleep(key32(&n.key), 0, ns)
160 if *cgo_yield != nil {
161 asmcgocall(*cgo_yield, nil)
162 }
163 gp.m.blocked = false
164 }
165 }
166
167
168
169
170
171
172 func notetsleep_internal(n *note, ns int64) bool {
173 gp := getg()
174
175 if ns < 0 {
176 if *cgo_yield != nil {
177
178 ns = 10e6
179 }
180 for atomic.Load(key32(&n.key)) == 0 {
181 gp.m.blocked = true
182 futexsleep(key32(&n.key), 0, ns)
183 if *cgo_yield != nil {
184 asmcgocall(*cgo_yield, nil)
185 }
186 gp.m.blocked = false
187 }
188 return true
189 }
190
191 if atomic.Load(key32(&n.key)) != 0 {
192 return true
193 }
194
195 deadline := nanotime() + ns
196 for {
197 if *cgo_yield != nil && ns > 10e6 {
198 ns = 10e6
199 }
200 gp.m.blocked = true
201 futexsleep(key32(&n.key), 0, ns)
202 if *cgo_yield != nil {
203 asmcgocall(*cgo_yield, nil)
204 }
205 gp.m.blocked = false
206 if atomic.Load(key32(&n.key)) != 0 {
207 break
208 }
209 now := nanotime()
210 if now >= deadline {
211 break
212 }
213 ns = deadline - now
214 }
215 return atomic.Load(key32(&n.key)) != 0
216 }
217
218 func notetsleep(n *note, ns int64) bool {
219 gp := getg()
220 if gp != gp.m.g0 && gp.m.preemptoff != "" {
221 throw("notetsleep not on g0")
222 }
223
224 return notetsleep_internal(n, ns)
225 }
226
227
228
229 func notetsleepg(n *note, ns int64) bool {
230 gp := getg()
231 if gp == gp.m.g0 {
232 throw("notetsleepg on g0")
233 }
234
235 entersyscallblock()
236 ok := notetsleep_internal(n, ns)
237 exitsyscall()
238 return ok
239 }
240
241 func beforeIdle(int64, int64) (*g, bool) {
242 return nil, false
243 }
244
245 func checkTimeouts() {}
246
View as plain text