Text file
src/runtime/race_ppc64le.s
1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 //go:build race
6
7 #include "go_asm.h"
8 #include "go_tls.h"
9 #include "funcdata.h"
10 #include "textflag.h"
11 #include "asm_ppc64x.h"
12
13 // The following functions allow calling the clang-compiled race runtime directly
14 // from Go code without going all the way through cgo.
15 // First, it's much faster (up to 50% speedup for real Go programs).
16 // Second, it eliminates race-related special cases from cgocall and scheduler.
17 // Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go.
18
19 // A brief recap of the ppc64le calling convention.
20 // Arguments are passed in R3, R4, R5 ...
21 // SP must be 16-byte aligned.
22
23 // Note that for ppc64x, LLVM follows the standard ABI and
24 // expects arguments in registers, so these functions move
25 // the arguments from storage to the registers expected
26 // by the ABI.
27
28 // When calling from Go to Clang tsan code:
29 // R3 is the 1st argument and is usually the ThreadState*
30 // R4-? are the 2nd, 3rd, 4th, etc. arguments
31
32 // When calling racecalladdr:
33 // R8 is the call target address
34
35 // The race ctx is passed in R3 and loaded in
36 // racecalladdr.
37 //
38 // The sequence used to get the race ctx:
39 // MOVD runtime·tls_g(SB), R10 // Address of TLS variable
40 // MOVD 0(R10), g // g = R30
41 // MOVD g_racectx(g), R3 // racectx == ThreadState
42
43 // func runtime·RaceRead(addr uintptr)
44 // Called from instrumented Go code
45 TEXT runtime·raceread<ABIInternal>(SB), NOSPLIT, $0-8
46 #ifndef GOEXPERIMENT_regabiargs
47 MOVD addr+0(FP), R4
48 #else
49 MOVD R3, R4
50 #endif
51 MOVD LR, R5 // caller of this?
52 // void __tsan_read(ThreadState *thr, void *addr, void *pc);
53 MOVD $__tsan_read(SB), R8
54 BR racecalladdr<>(SB)
55
56 TEXT runtime·RaceRead(SB), NOSPLIT, $0-8
57 BR runtime·raceread(SB)
58
59 // void runtime·racereadpc(void *addr, void *callpc, void *pc)
60 TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
61 MOVD addr+0(FP), R4
62 MOVD callpc+8(FP), R5
63 MOVD pc+16(FP), R6
64 // void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
65 MOVD $__tsan_read_pc(SB), R8
66 BR racecalladdr<>(SB)
67
68 // func runtime·RaceWrite(addr uintptr)
69 // Called from instrumented Go code
70 TEXT runtime·racewrite<ABIInternal>(SB), NOSPLIT, $0-8
71 #ifndef GOEXPERIMENT_regabiargs
72 MOVD addr+0(FP), R4
73 #else
74 MOVD R3, R4
75 #endif
76 MOVD LR, R5 // caller has set LR via BL inst
77 // void __tsan_write(ThreadState *thr, void *addr, void *pc);
78 MOVD $__tsan_write(SB), R8
79 BR racecalladdr<>(SB)
80
81 TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8
82 JMP runtime·racewrite(SB)
83
84 // void runtime·racewritepc(void *addr, void *callpc, void *pc)
85 TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
86 MOVD addr+0(FP), R4
87 MOVD callpc+8(FP), R5
88 MOVD pc+16(FP), R6
89 // void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
90 MOVD $__tsan_write_pc(SB), R8
91 BR racecalladdr<>(SB)
92
93 // func runtime·RaceReadRange(addr, size uintptr)
94 // Called from instrumented Go code.
95 TEXT runtime·racereadrange<ABIInternal>(SB), NOSPLIT, $0-16
96 #ifndef GOEXPERIMENT_regabiargs
97 MOVD addr+0(FP), R4
98 MOVD size+8(FP), R5
99 #else
100 MOVD R4, R5
101 MOVD R3, R4
102 #endif
103 MOVD LR, R6
104 // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
105 MOVD $__tsan_read_range(SB), R8
106 BR racecalladdr<>(SB)
107
108 // void runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
109 TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
110 MOVD addr+0(FP), R4
111 MOVD size+8(FP), R5
112 MOVD pc+16(FP), R6
113 ADD $4, R6 // tsan wants return addr
114 // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
115 MOVD $__tsan_read_range(SB), R8
116 BR racecalladdr<>(SB)
117
118 TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16
119 BR runtime·racereadrange(SB)
120
121 // func runtime·RaceWriteRange(addr, size uintptr)
122 // Called from instrumented Go code.
123 TEXT runtime·racewriterange<ABIInternal>(SB), NOSPLIT, $0-16
124 #ifndef GOEXPERIMENT_regabiargs
125 MOVD addr+0(FP), R4
126 MOVD size+8(FP), R5
127 #else
128 MOVD R4, R5
129 MOVD R3, R4
130 #endif
131 MOVD LR, R6
132 // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
133 MOVD $__tsan_write_range(SB), R8
134 BR racecalladdr<>(SB)
135
136 TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16
137 BR runtime·racewriterange(SB)
138
139 // void runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
140 // Called from instrumented Go code
141 TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
142 MOVD addr+0(FP), R4
143 MOVD size+8(FP), R5
144 MOVD pc+16(FP), R6
145 ADD $4, R6 // add 4 to inst offset?
146 // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
147 MOVD $__tsan_write_range(SB), R8
148 BR racecalladdr<>(SB)
149
150 // Call a __tsan function from Go code.
151 // R8 = tsan function address
152 // R3 = *ThreadState a.k.a. g_racectx from g
153 // R4 = addr passed to __tsan function
154 //
155 // Otherwise, setup goroutine context and invoke racecall. Other arguments already set.
156 TEXT racecalladdr<>(SB), NOSPLIT, $0-0
157 MOVD runtime·tls_g(SB), R10
158 MOVD 0(R10), g
159 MOVD g_racectx(g), R3 // goroutine context
160 // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
161 MOVD runtime·racearenastart(SB), R9
162 CMP R4, R9
163 BLT data
164 MOVD runtime·racearenaend(SB), R9
165 CMP R4, R9
166 BLT call
167 data:
168 MOVD runtime·racedatastart(SB), R9
169 CMP R4, R9
170 BLT ret
171 MOVD runtime·racedataend(SB), R9
172 CMP R4, R9
173 BGT ret
174 call:
175 // Careful!! racecall will save LR on its
176 // stack, which is OK as long as racecalladdr
177 // doesn't change in a way that generates a stack.
178 // racecall should return to the caller of
179 // recalladdr.
180 BR racecall<>(SB)
181 ret:
182 RET
183
184 // func runtime·racefuncenter(pc uintptr)
185 // Called from instrumented Go code.
186 TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
187 MOVD callpc+0(FP), R8
188 BR racefuncenter<>(SB)
189
190 // Common code for racefuncenter
191 // R11 = caller's return address
192 TEXT racefuncenter<>(SB), NOSPLIT, $0-0
193 MOVD runtime·tls_g(SB), R10
194 MOVD 0(R10), g
195 MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState
196 MOVD R8, R4 // caller pc set by caller in R8
197 // void __tsan_func_enter(ThreadState *thr, void *pc);
198 MOVD $__tsan_func_enter(SB), R8
199 BR racecall<>(SB)
200 RET
201
202 // func runtime·racefuncexit()
203 // Called from Go instrumented code.
204 TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
205 MOVD runtime·tls_g(SB), R10
206 MOVD 0(R10), g
207 MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState
208 // void __tsan_func_exit(ThreadState *thr);
209 MOVD $__tsan_func_exit(SB), R8
210 BR racecall<>(SB)
211
212 // Atomic operations for sync/atomic package.
213 // Some use the __tsan versions instead
214 // R6 = addr of arguments passed to this function
215 // R3, R4, R5 set in racecallatomic
216
217 // Load atomic in tsan
218 TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12
219 GO_ARGS
220 // void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
221 MOVD $__tsan_go_atomic32_load(SB), R8
222 ADD $32, R1, R6 // addr of caller's 1st arg
223 BR racecallatomic<>(SB)
224 RET
225
226 TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16
227 GO_ARGS
228 // void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
229 MOVD $__tsan_go_atomic64_load(SB), R8
230 ADD $32, R1, R6 // addr of caller's 1st arg
231 BR racecallatomic<>(SB)
232 RET
233
234 TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-12
235 GO_ARGS
236 BR sync∕atomic·LoadInt32(SB)
237
238 TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-16
239 GO_ARGS
240 BR sync∕atomic·LoadInt64(SB)
241
242 TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-16
243 GO_ARGS
244 BR sync∕atomic·LoadInt64(SB)
245
246 TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16
247 GO_ARGS
248 BR sync∕atomic·LoadInt64(SB)
249
250 // Store atomic in tsan
251 TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12
252 GO_ARGS
253 // void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
254 MOVD $__tsan_go_atomic32_store(SB), R8
255 ADD $32, R1, R6 // addr of caller's 1st arg
256 BR racecallatomic<>(SB)
257
258 TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16
259 GO_ARGS
260 // void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
261 MOVD $__tsan_go_atomic64_store(SB), R8
262 ADD $32, R1, R6 // addr of caller's 1st arg
263 BR racecallatomic<>(SB)
264
265 TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-12
266 GO_ARGS
267 BR sync∕atomic·StoreInt32(SB)
268
269 TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-16
270 GO_ARGS
271 BR sync∕atomic·StoreInt64(SB)
272
273 TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16
274 GO_ARGS
275 BR sync∕atomic·StoreInt64(SB)
276
277 // Swap in tsan
278 TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20
279 GO_ARGS
280 // void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
281 MOVD $__tsan_go_atomic32_exchange(SB), R8
282 ADD $32, R1, R6 // addr of caller's 1st arg
283 BR racecallatomic<>(SB)
284
285 TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24
286 GO_ARGS
287 // void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a)
288 MOVD $__tsan_go_atomic64_exchange(SB), R8
289 ADD $32, R1, R6 // addr of caller's 1st arg
290 BR racecallatomic<>(SB)
291
292 TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-20
293 GO_ARGS
294 BR sync∕atomic·SwapInt32(SB)
295
296 TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-24
297 GO_ARGS
298 BR sync∕atomic·SwapInt64(SB)
299
300 TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24
301 GO_ARGS
302 BR sync∕atomic·SwapInt64(SB)
303
304 // Add atomic in tsan
305 TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
306 GO_ARGS
307 // void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
308 MOVD $__tsan_go_atomic32_fetch_add(SB), R8
309 ADD $64, R1, R6 // addr of caller's 1st arg
310 BL racecallatomic<>(SB)
311 // The tsan fetch_add result is not as expected by Go,
312 // so the 'add' must be added to the result.
313 MOVW add+8(FP), R3 // The tsa fetch_add does not return the
314 MOVW ret+16(FP), R4 // result as expected by go, so fix it.
315 ADD R3, R4, R3
316 MOVW R3, ret+16(FP)
317 RET
318
319 TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24
320 GO_ARGS
321 // void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
322 MOVD $__tsan_go_atomic64_fetch_add(SB), R8
323 ADD $64, R1, R6 // addr of caller's 1st arg
324 BL racecallatomic<>(SB)
325 // The tsan fetch_add result is not as expected by Go,
326 // so the 'add' must be added to the result.
327 MOVD add+8(FP), R3
328 MOVD ret+16(FP), R4
329 ADD R3, R4, R3
330 MOVD R3, ret+16(FP)
331 RET
332
333 TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-20
334 GO_ARGS
335 BR sync∕atomic·AddInt32(SB)
336
337 TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-24
338 GO_ARGS
339 BR sync∕atomic·AddInt64(SB)
340
341 TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
342 GO_ARGS
343 BR sync∕atomic·AddInt64(SB)
344
345 // CompareAndSwap in tsan
346 TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
347 GO_ARGS
348 // void __tsan_go_atomic32_compare_exchange(
349 // ThreadState *thr, uptr cpc, uptr pc, u8 *a)
350 MOVD $__tsan_go_atomic32_compare_exchange(SB), R8
351 ADD $32, R1, R6 // addr of caller's 1st arg
352 BR racecallatomic<>(SB)
353
354 TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25
355 GO_ARGS
356 // void __tsan_go_atomic32_compare_exchange(
357 // ThreadState *thr, uptr cpc, uptr pc, u8 *a)
358 MOVD $__tsan_go_atomic64_compare_exchange(SB), R8
359 ADD $32, R1, R6 // addr of caller's 1st arg
360 BR racecallatomic<>(SB)
361
362 TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-17
363 GO_ARGS
364 BR sync∕atomic·CompareAndSwapInt32(SB)
365
366 TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-25
367 GO_ARGS
368 BR sync∕atomic·CompareAndSwapInt64(SB)
369
370 TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25
371 GO_ARGS
372 BR sync∕atomic·CompareAndSwapInt64(SB)
373
374 // Common function used to call tsan's atomic functions
375 // R3 = *ThreadState
376 // R4 = TODO: What's this supposed to be?
377 // R5 = caller pc
378 // R6 = addr of incoming arg list
379 // R8 contains addr of target function.
380 TEXT racecallatomic<>(SB), NOSPLIT, $0-0
381 // Trigger SIGSEGV early if address passed to atomic function is bad.
382 MOVD (R6), R7 // 1st arg is addr
383 MOVD (R7), R9 // segv here if addr is bad
384 // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
385 MOVD runtime·racearenastart(SB), R9
386 CMP R7, R9
387 BLT racecallatomic_data
388 MOVD runtime·racearenaend(SB), R9
389 CMP R7, R9
390 BLT racecallatomic_ok
391 racecallatomic_data:
392 MOVD runtime·racedatastart(SB), R9
393 CMP R7, R9
394 BLT racecallatomic_ignore
395 MOVD runtime·racedataend(SB), R9
396 CMP R7, R9
397 BGE racecallatomic_ignore
398 racecallatomic_ok:
399 // Addr is within the good range, call the atomic function.
400 MOVD runtime·tls_g(SB), R10
401 MOVD 0(R10), g
402 MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState
403 MOVD R8, R5 // pc is the function called
404 MOVD (R1), R4 // caller pc from stack
405 BL racecall<>(SB) // BL needed to maintain stack consistency
406 RET //
407 racecallatomic_ignore:
408 // Addr is outside the good range.
409 // Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op.
410 // An attempt to synchronize on the address would cause crash.
411 MOVD R8, R15 // save the original function
412 MOVD R6, R17 // save the original arg list addr
413 MOVD $__tsan_go_ignore_sync_begin(SB), R8 // func addr to call
414 MOVD runtime·tls_g(SB), R10
415 MOVD 0(R10), g
416 MOVD g_racectx(g), R3 // goroutine context
417 BL racecall<>(SB)
418 MOVD R15, R8 // restore the original function
419 MOVD R17, R6 // restore arg list addr
420 // Call the atomic function.
421 // racecall will call LLVM race code which might clobber r30 (g)
422 MOVD runtime·tls_g(SB), R10
423 MOVD 0(R10), g
424
425 MOVD g_racectx(g), R3
426 MOVD R8, R4 // pc being called same TODO as above
427 MOVD (R1), R5 // caller pc from latest LR
428 BL racecall<>(SB)
429 // Call __tsan_go_ignore_sync_end.
430 MOVD $__tsan_go_ignore_sync_end(SB), R8
431 MOVD g_racectx(g), R3 // goroutine context g should sitll be good?
432 BL racecall<>(SB)
433 RET
434
435 // void runtime·racecall(void(*f)(...), ...)
436 // Calls C function f from race runtime and passes up to 4 arguments to it.
437 // The arguments are never heap-object-preserving pointers, so we pretend there are no arguments.
438 TEXT runtime·racecall(SB), NOSPLIT, $0-0
439 MOVD fn+0(FP), R8
440 MOVD arg0+8(FP), R3
441 MOVD arg1+16(FP), R4
442 MOVD arg2+24(FP), R5
443 MOVD arg3+32(FP), R6
444 JMP racecall<>(SB)
445
446 // Finds g0 and sets its stack
447 // Arguments were loaded for call from Go to C
448 TEXT racecall<>(SB), NOSPLIT, $0-0
449 // Set the LR slot for the ppc64 ABI
450 MOVD LR, R10
451 MOVD R10, 0(R1) // Go expectation
452 MOVD R10, 16(R1) // C ABI
453 // Get info from the current goroutine
454 MOVD runtime·tls_g(SB), R10 // g offset in TLS
455 MOVD 0(R10), g
456 MOVD g_m(g), R7 // m for g
457 MOVD R1, R16 // callee-saved, preserved across C call
458 MOVD m_g0(R7), R10 // g0 for m
459 CMP R10, g // same g0?
460 BEQ call // already on g0
461 MOVD (g_sched+gobuf_sp)(R10), R1 // switch R1
462 call:
463 MOVD R8, CTR // R8 = caller addr
464 MOVD R8, R12 // expected by PPC64 ABI
465 BL (CTR)
466 XOR R0, R0 // clear R0 on return from Clang
467 MOVD R16, R1 // restore R1; R16 nonvol in Clang
468 MOVD runtime·tls_g(SB), R10 // find correct g
469 MOVD 0(R10), g
470 MOVD 16(R1), R10 // LR was saved away, restore for return
471 MOVD R10, LR
472 RET
473
474 // C->Go callback thunk that allows to call runtime·racesymbolize from C code.
475 // Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g.
476 // The overall effect of Go->C->Go call chain is similar to that of mcall.
477 // RARG0 contains command code. RARG1 contains command-specific context.
478 // See racecallback for command codes.
479 TEXT runtime·racecallbackthunk(SB), NOSPLIT, $-8
480 // Handle command raceGetProcCmd (0) here.
481 // First, code below assumes that we are on curg, while raceGetProcCmd
482 // can be executed on g0. Second, it is called frequently, so will
483 // benefit from this fast path.
484 XOR R0, R0 // clear R0 since we came from C code
485 CMP R3, $0
486 BNE rest
487 // g0 TODO: Don't modify g here since R30 is nonvolatile
488 MOVD g, R9
489 MOVD runtime·tls_g(SB), R10
490 MOVD 0(R10), g
491 MOVD g_m(g), R3
492 MOVD m_p(R3), R3
493 MOVD p_raceprocctx(R3), R3
494 MOVD R3, (R4)
495 MOVD R9, g // restore R30 ??
496 RET
497
498 // This is all similar to what cgo does
499 // Save registers according to the ppc64 ABI
500 rest:
501 MOVD LR, R10 // save link register
502 MOVD R10, 16(R1)
503 MOVW CR, R10
504 MOVW R10, 8(R1)
505 MOVDU R1, -336(R1) // Allocate frame needed for outargs and register save area
506
507 MOVD R14, 328(R1)
508 MOVD R15, 48(R1)
509 MOVD R16, 56(R1)
510 MOVD R17, 64(R1)
511 MOVD R18, 72(R1)
512 MOVD R19, 80(R1)
513 MOVD R20, 88(R1)
514 MOVD R21, 96(R1)
515 MOVD R22, 104(R1)
516 MOVD R23, 112(R1)
517 MOVD R24, 120(R1)
518 MOVD R25, 128(R1)
519 MOVD R26, 136(R1)
520 MOVD R27, 144(R1)
521 MOVD R28, 152(R1)
522 MOVD R29, 160(R1)
523 MOVD g, 168(R1) // R30
524 MOVD R31, 176(R1)
525 FMOVD F14, 184(R1)
526 FMOVD F15, 192(R1)
527 FMOVD F16, 200(R1)
528 FMOVD F17, 208(R1)
529 FMOVD F18, 216(R1)
530 FMOVD F19, 224(R1)
531 FMOVD F20, 232(R1)
532 FMOVD F21, 240(R1)
533 FMOVD F22, 248(R1)
534 FMOVD F23, 256(R1)
535 FMOVD F24, 264(R1)
536 FMOVD F25, 272(R1)
537 FMOVD F26, 280(R1)
538 FMOVD F27, 288(R1)
539 FMOVD F28, 296(R1)
540 FMOVD F29, 304(R1)
541 FMOVD F30, 312(R1)
542 FMOVD F31, 320(R1)
543
544 MOVD R3, FIXED_FRAME+0(R1)
545 MOVD R4, FIXED_FRAME+8(R1)
546
547 MOVD runtime·tls_g(SB), R10
548 MOVD 0(R10), g
549
550 MOVD g_m(g), R7
551 MOVD m_g0(R7), R8
552 CMP g, R8
553 BEQ noswitch
554
555 MOVD R8, g // set g = m-> g0
556
557 BL runtime·racecallback(SB)
558
559 // All registers are clobbered after Go code, reload.
560 MOVD runtime·tls_g(SB), R10
561 MOVD 0(R10), g
562
563 MOVD g_m(g), R7
564 MOVD m_curg(R7), g // restore g = m->curg
565
566 ret:
567 MOVD 328(R1), R14
568 MOVD 48(R1), R15
569 MOVD 56(R1), R16
570 MOVD 64(R1), R17
571 MOVD 72(R1), R18
572 MOVD 80(R1), R19
573 MOVD 88(R1), R20
574 MOVD 96(R1), R21
575 MOVD 104(R1), R22
576 MOVD 112(R1), R23
577 MOVD 120(R1), R24
578 MOVD 128(R1), R25
579 MOVD 136(R1), R26
580 MOVD 144(R1), R27
581 MOVD 152(R1), R28
582 MOVD 160(R1), R29
583 MOVD 168(R1), g // R30
584 MOVD 176(R1), R31
585 FMOVD 184(R1), F14
586 FMOVD 192(R1), F15
587 FMOVD 200(R1), F16
588 FMOVD 208(R1), F17
589 FMOVD 216(R1), F18
590 FMOVD 224(R1), F19
591 FMOVD 232(R1), F20
592 FMOVD 240(R1), F21
593 FMOVD 248(R1), F22
594 FMOVD 256(R1), F23
595 FMOVD 264(R1), F24
596 FMOVD 272(R1), F25
597 FMOVD 280(R1), F26
598 FMOVD 288(R1), F27
599 FMOVD 296(R1), F28
600 FMOVD 304(R1), F29
601 FMOVD 312(R1), F30
602 FMOVD 320(R1), F31
603
604 ADD $336, R1
605 MOVD 8(R1), R10
606 MOVFL R10, $0xff // Restore of CR
607 MOVD 16(R1), R10 // needed?
608 MOVD R10, LR
609 RET
610
611 noswitch:
612 BL runtime·racecallback(SB)
613 JMP ret
614
615 // tls_g, g value for each thread in TLS
616 GLOBL runtime·tls_g+0(SB), TLSBSS+DUPOK, $8
617
View as plain text