1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 //go:build mips || mipsle
6
7 #include "textflag.h"
8
9 // bool Cas(int32 *val, int32 old, int32 new)
10 // Atomically:
11 // if(*val == old){
12 // *val = new;
13 // return 1;
14 // } else
15 // return 0;
16 TEXT ·Cas(SB),NOSPLIT,$0-13
17 MOVW ptr+0(FP), R1
18 MOVW old+4(FP), R2
19 MOVW new+8(FP), R5
20 SYNC
21 try_cas:
22 MOVW R5, R3
23 LL (R1), R4 // R4 = *R1
24 BNE R2, R4, cas_fail
25 SC R3, (R1) // *R1 = R3
26 BEQ R3, try_cas
27 SYNC
28 MOVB R3, ret+12(FP)
29 RET
30 cas_fail:
31 MOVB R0, ret+12(FP)
32 RET
33
34 TEXT ·Store(SB),NOSPLIT,$0-8
35 MOVW ptr+0(FP), R1
36 MOVW val+4(FP), R2
37 SYNC
38 MOVW R2, 0(R1)
39 SYNC
40 RET
41
42 TEXT ·Store8(SB),NOSPLIT,$0-5
43 MOVW ptr+0(FP), R1
44 MOVB val+4(FP), R2
45 SYNC
46 MOVB R2, 0(R1)
47 SYNC
48 RET
49
50 TEXT ·Load(SB),NOSPLIT,$0-8
51 MOVW ptr+0(FP), R1
52 SYNC
53 MOVW 0(R1), R1
54 SYNC
55 MOVW R1, ret+4(FP)
56 RET
57
58 TEXT ·Load8(SB),NOSPLIT,$0-5
59 MOVW ptr+0(FP), R1
60 SYNC
61 MOVB 0(R1), R1
62 SYNC
63 MOVB R1, ret+4(FP)
64 RET
65
66 // uint32 Xadd(uint32 volatile *val, int32 delta)
67 // Atomically:
68 // *val += delta;
69 // return *val;
70 TEXT ·Xadd(SB),NOSPLIT,$0-12
71 MOVW ptr+0(FP), R2
72 MOVW delta+4(FP), R3
73 SYNC
74 try_xadd:
75 LL (R2), R1 // R1 = *R2
76 ADDU R1, R3, R4
77 MOVW R4, R1
78 SC R4, (R2) // *R2 = R4
79 BEQ R4, try_xadd
80 SYNC
81 MOVW R1, ret+8(FP)
82 RET
83
84 // uint32 Xchg(ptr *uint32, new uint32)
85 // Atomically:
86 // old := *ptr;
87 // *ptr = new;
88 // return old;
89 TEXT ·Xchg(SB),NOSPLIT,$0-12
90 MOVW ptr+0(FP), R2
91 MOVW new+4(FP), R5
92 SYNC
93 try_xchg:
94 MOVW R5, R3
95 LL (R2), R1 // R1 = *R2
96 SC R3, (R2) // *R2 = R3
97 BEQ R3, try_xchg
98 SYNC
99 MOVW R1, ret+8(FP)
100 RET
101
102 TEXT ·Casint32(SB),NOSPLIT,$0-13
103 JMP ·Cas(SB)
104
105 TEXT ·Casint64(SB),NOSPLIT,$0-21
106 JMP ·Cas64(SB)
107
108 TEXT ·Casuintptr(SB),NOSPLIT,$0-13
109 JMP ·Cas(SB)
110
111 TEXT ·CasRel(SB),NOSPLIT,$0-13
112 JMP ·Cas(SB)
113
114 TEXT ·Loaduintptr(SB),NOSPLIT,$0-8
115 JMP ·Load(SB)
116
117 TEXT ·Loaduint(SB),NOSPLIT,$0-8
118 JMP ·Load(SB)
119
120 TEXT ·Loadp(SB),NOSPLIT,$-0-8
121 JMP ·Load(SB)
122
123 TEXT ·Storeint32(SB),NOSPLIT,$0-8
124 JMP ·Store(SB)
125
126 TEXT ·Storeint64(SB),NOSPLIT,$0-12
127 JMP ·Store64(SB)
128
129 TEXT ·Storeuintptr(SB),NOSPLIT,$0-8
130 JMP ·Store(SB)
131
132 TEXT ·Xadduintptr(SB),NOSPLIT,$0-12
133 JMP ·Xadd(SB)
134
135 TEXT ·Loadint32(SB),NOSPLIT,$0-8
136 JMP ·Load(SB)
137
138 TEXT ·Loadint64(SB),NOSPLIT,$0-12
139 JMP ·Load64(SB)
140
141 TEXT ·Xaddint32(SB),NOSPLIT,$0-12
142 JMP ·Xadd(SB)
143
144 TEXT ·Xaddint64(SB),NOSPLIT,$0-20
145 JMP ·Xadd64(SB)
146
147 TEXT ·Casp1(SB),NOSPLIT,$0-13
148 JMP ·Cas(SB)
149
150 TEXT ·Xchgint32(SB),NOSPLIT,$0-12
151 JMP ·Xchg(SB)
152
153 TEXT ·Xchgint64(SB),NOSPLIT,$0-20
154 JMP ·Xchg64(SB)
155
156 TEXT ·Xchguintptr(SB),NOSPLIT,$0-12
157 JMP ·Xchg(SB)
158
159 TEXT ·StorepNoWB(SB),NOSPLIT,$0-8
160 JMP ·Store(SB)
161
162 TEXT ·StoreRel(SB),NOSPLIT,$0-8
163 JMP ·Store(SB)
164
165 TEXT ·StoreReluintptr(SB),NOSPLIT,$0-8
166 JMP ·Store(SB)
167
168 // void Or8(byte volatile*, byte);
169 TEXT ·Or8(SB),NOSPLIT,$0-5
170 MOVW ptr+0(FP), R1
171 MOVBU val+4(FP), R2
172 MOVW $~3, R3 // Align ptr down to 4 bytes so we can use 32-bit load/store.
173 AND R1, R3
174 #ifdef GOARCH_mips
175 // Big endian. ptr = ptr ^ 3
176 XOR $3, R1
177 #endif
178 AND $3, R1, R4 // R4 = ((ptr & 3) * 8)
179 SLL $3, R4
180 SLL R4, R2, R2 // Shift val for aligned ptr. R2 = val << R4
181 SYNC
182 try_or8:
183 LL (R3), R4 // R4 = *R3
184 OR R2, R4
185 SC R4, (R3) // *R3 = R4
186 BEQ R4, try_or8
187 SYNC
188 RET
189
190 // void And8(byte volatile*, byte);
191 TEXT ·And8(SB),NOSPLIT,$0-5
192 MOVW ptr+0(FP), R1
193 MOVBU val+4(FP), R2
194 MOVW $~3, R3
195 AND R1, R3
196 #ifdef GOARCH_mips
197 // Big endian. ptr = ptr ^ 3
198 XOR $3, R1
199 #endif
200 AND $3, R1, R4 // R4 = ((ptr & 3) * 8)
201 SLL $3, R4
202 MOVW $0xFF, R5
203 SLL R4, R2
204 SLL R4, R5
205 NOR R0, R5
206 OR R5, R2 // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4)
207 SYNC
208 try_and8:
209 LL (R3), R4 // R4 = *R3
210 AND R2, R4
211 SC R4, (R3) // *R3 = R4
212 BEQ R4, try_and8
213 SYNC
214 RET
215
216 // func Or(addr *uint32, v uint32)
217 TEXT ·Or(SB), NOSPLIT, $0-8
218 MOVW ptr+0(FP), R1
219 MOVW val+4(FP), R2
220
221 SYNC
222 LL (R1), R3
223 OR R2, R3
224 SC R3, (R1)
225 BEQ R3, -4(PC)
226 SYNC
227 RET
228
229 // func And(addr *uint32, v uint32)
230 TEXT ·And(SB), NOSPLIT, $0-8
231 MOVW ptr+0(FP), R1
232 MOVW val+4(FP), R2
233
234 SYNC
235 LL (R1), R3
236 AND R2, R3
237 SC R3, (R1)
238 BEQ R3, -4(PC)
239 SYNC
240 RET
241
242 TEXT ·spinLock(SB),NOSPLIT,$0-4
243 MOVW state+0(FP), R1
244 MOVW $1, R2
245 SYNC
246 try_lock:
247 MOVW R2, R3
248 check_again:
249 LL (R1), R4
250 BNE R4, check_again
251 SC R3, (R1)
252 BEQ R3, try_lock
253 SYNC
254 RET
255
256 TEXT ·spinUnlock(SB),NOSPLIT,$0-4
257 MOVW state+0(FP), R1
258 SYNC
259 MOVW R0, (R1)
260 SYNC
261 RET
262
View as plain text