1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include "textflag.h"
6
7 // Linux/ARM atomic operations.
8
9 // Because there is so much variation in ARM devices,
10 // the Linux kernel provides an appropriate compare-and-swap
11 // implementation at address 0xffff0fc0. Caller sets:
12 // R0 = old value
13 // R1 = new value
14 // R2 = addr
15 // LR = return address
16 // The function returns with CS true if the swap happened.
17 // http://lxr.linux.no/linux+v2.6.37.2/arch/arm/kernel/entry-armv.S#L850
18 // On older kernels (before 2.6.24) the function can incorrectly
19 // report a conflict, so we have to double-check the compare ourselves
20 // and retry if necessary.
21 //
22 // https://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=b49c0f24cf6744a3f4fd09289fe7cade349dead5
23 //
24 TEXT cas<>(SB),NOSPLIT,$0
25 MOVW $0xffff0fc0, R15 // R15 is hardware PC.
26
27 TEXT ·Cas(SB),NOSPLIT|NOFRAME,$0
28 MOVB runtime·goarm(SB), R11
29 CMP $7, R11
30 BLT 2(PC)
31 JMP ·armcas(SB)
32 JMP kernelcas<>(SB)
33
34 TEXT kernelcas<>(SB),NOSPLIT,$0
35 MOVW ptr+0(FP), R2
36 // trigger potential paging fault here,
37 // because we don't know how to traceback through __kuser_cmpxchg
38 MOVW (R2), R0
39 MOVW old+4(FP), R0
40 loop:
41 MOVW new+8(FP), R1
42 BL cas<>(SB)
43 BCC check
44 MOVW $1, R0
45 MOVB R0, ret+12(FP)
46 RET
47 check:
48 // Kernel lies; double-check.
49 MOVW ptr+0(FP), R2
50 MOVW old+4(FP), R0
51 MOVW 0(R2), R3
52 CMP R0, R3
53 BEQ loop
54 MOVW $0, R0
55 MOVB R0, ret+12(FP)
56 RET
57
58 // As for cas, memory barriers are complicated on ARM, but the kernel
59 // provides a user helper. ARMv5 does not support SMP and has no
60 // memory barrier instruction at all. ARMv6 added SMP support and has
61 // a memory barrier, but it requires writing to a coprocessor
62 // register. ARMv7 introduced the DMB instruction, but it's expensive
63 // even on single-core devices. The kernel helper takes care of all of
64 // this for us.
65
66 // Use kernel helper version of memory_barrier, when compiled with GOARM < 7.
67 TEXT memory_barrier<>(SB),NOSPLIT|NOFRAME,$0
68 MOVW $0xffff0fa0, R15 // R15 is hardware PC.
69
70 TEXT ·Load(SB),NOSPLIT,$0-8
71 MOVW addr+0(FP), R0
72 MOVW (R0), R1
73
74 MOVB runtime·goarm(SB), R11
75 CMP $7, R11
76 BGE native_barrier
77 BL memory_barrier<>(SB)
78 B end
79 native_barrier:
80 DMB MB_ISH
81 end:
82 MOVW R1, ret+4(FP)
83 RET
84
85 TEXT ·Store(SB),NOSPLIT,$0-8
86 MOVW addr+0(FP), R1
87 MOVW v+4(FP), R2
88
89 MOVB runtime·goarm(SB), R8
90 CMP $7, R8
91 BGE native_barrier
92 BL memory_barrier<>(SB)
93 B store
94 native_barrier:
95 DMB MB_ISH
96
97 store:
98 MOVW R2, (R1)
99
100 CMP $7, R8
101 BGE native_barrier2
102 BL memory_barrier<>(SB)
103 RET
104 native_barrier2:
105 DMB MB_ISH
106 RET
107
108 TEXT ·Load8(SB),NOSPLIT,$0-5
109 MOVW addr+0(FP), R0
110 MOVB (R0), R1
111
112 MOVB runtime·goarm(SB), R11
113 CMP $7, R11
114 BGE native_barrier
115 BL memory_barrier<>(SB)
116 B end
117 native_barrier:
118 DMB MB_ISH
119 end:
120 MOVB R1, ret+4(FP)
121 RET
122
123 TEXT ·Store8(SB),NOSPLIT,$0-5
124 MOVW addr+0(FP), R1
125 MOVB v+4(FP), R2
126
127 MOVB runtime·goarm(SB), R8
128 CMP $7, R8
129 BGE native_barrier
130 BL memory_barrier<>(SB)
131 B store
132 native_barrier:
133 DMB MB_ISH
134
135 store:
136 MOVB R2, (R1)
137
138 CMP $7, R8
139 BGE native_barrier2
140 BL memory_barrier<>(SB)
141 RET
142 native_barrier2:
143 DMB MB_ISH
144 RET
145
View as plain text