Source file src/runtime/cgocall.go
1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Cgo call and callback support. 6 // 7 // To call into the C function f from Go, the cgo-generated code calls 8 // runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a 9 // gcc-compiled function written by cgo. 10 // 11 // runtime.cgocall (below) calls entersyscall so as not to block 12 // other goroutines or the garbage collector, and then calls 13 // runtime.asmcgocall(_cgo_Cfunc_f, frame). 14 // 15 // runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack 16 // (assumed to be an operating system-allocated stack, so safe to run 17 // gcc-compiled code on) and calls _cgo_Cfunc_f(frame). 18 // 19 // _cgo_Cfunc_f invokes the actual C function f with arguments 20 // taken from the frame structure, records the results in the frame, 21 // and returns to runtime.asmcgocall. 22 // 23 // After it regains control, runtime.asmcgocall switches back to the 24 // original g (m->curg)'s stack and returns to runtime.cgocall. 25 // 26 // After it regains control, runtime.cgocall calls exitsyscall, which blocks 27 // until this m can run Go code without violating the $GOMAXPROCS limit, 28 // and then unlocks g from m. 29 // 30 // The above description skipped over the possibility of the gcc-compiled 31 // function f calling back into Go. If that happens, we continue down 32 // the rabbit hole during the execution of f. 33 // 34 // To make it possible for gcc-compiled C code to call a Go function p.GoF, 35 // cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't 36 // know about packages). The gcc-compiled C function f calls GoF. 37 // 38 // GoF initializes "frame", a structure containing all of its 39 // arguments and slots for p.GoF's results. It calls 40 // crosscall2(_cgoexp_GoF, frame, framesize, ctxt) using the gcc ABI. 41 // 42 // crosscall2 (in cgo/asm_$GOARCH.s) is a four-argument adapter from 43 // the gcc function call ABI to the gc function call ABI. At this 44 // point we're in the Go runtime, but we're still running on m.g0's 45 // stack and outside the $GOMAXPROCS limit. crosscall2 calls 46 // runtime.cgocallback(_cgoexp_GoF, frame, ctxt) using the gc ABI. 47 // (crosscall2's framesize argument is no longer used, but there's one 48 // case where SWIG calls crosscall2 directly and expects to pass this 49 // argument. See _cgo_panic.) 50 // 51 // runtime.cgocallback (in asm_$GOARCH.s) switches from m.g0's stack 52 // to the original g (m.curg)'s stack, on which it calls 53 // runtime.cgocallbackg(_cgoexp_GoF, frame, ctxt). As part of the 54 // stack switch, runtime.cgocallback saves the current SP as 55 // m.g0.sched.sp, so that any use of m.g0's stack during the execution 56 // of the callback will be done below the existing stack frames. 57 // Before overwriting m.g0.sched.sp, it pushes the old value on the 58 // m.g0 stack, so that it can be restored later. 59 // 60 // runtime.cgocallbackg (below) is now running on a real goroutine 61 // stack (not an m.g0 stack). First it calls runtime.exitsyscall, which will 62 // block until the $GOMAXPROCS limit allows running this goroutine. 63 // Once exitsyscall has returned, it is safe to do things like call the memory 64 // allocator or invoke the Go callback function. runtime.cgocallbackg 65 // first defers a function to unwind m.g0.sched.sp, so that if p.GoF 66 // panics, m.g0.sched.sp will be restored to its old value: the m.g0 stack 67 // and the m.curg stack will be unwound in lock step. 68 // Then it calls _cgoexp_GoF(frame). 69 // 70 // _cgoexp_GoF, which was generated by cmd/cgo, unpacks the arguments 71 // from frame, calls p.GoF, writes the results back to frame, and 72 // returns. Now we start unwinding this whole process. 73 // 74 // runtime.cgocallbackg pops but does not execute the deferred 75 // function to unwind m.g0.sched.sp, calls runtime.entersyscall, and 76 // returns to runtime.cgocallback. 77 // 78 // After it regains control, runtime.cgocallback switches back to 79 // m.g0's stack (the pointer is still in m.g0.sched.sp), restores the old 80 // m.g0.sched.sp value from the stack, and returns to crosscall2. 81 // 82 // crosscall2 restores the callee-save registers for gcc and returns 83 // to GoF, which unpacks any result values and returns to f. 84 85 package runtime 86 87 import ( 88 "internal/goarch" 89 "runtime/internal/atomic" 90 "runtime/internal/sys" 91 "unsafe" 92 ) 93 94 // Addresses collected in a cgo backtrace when crashing. 95 // Length must match arg.Max in x_cgo_callers in runtime/cgo/gcc_traceback.c. 96 type cgoCallers [32]uintptr 97 98 // argset matches runtime/cgo/linux_syscall.c:argset_t 99 type argset struct { 100 args unsafe.Pointer 101 retval uintptr 102 } 103 104 // wrapper for syscall package to call cgocall for libc (cgo) calls. 105 //go:linkname syscall_cgocaller syscall.cgocaller 106 //go:nosplit 107 //go:uintptrescapes 108 func syscall_cgocaller(fn unsafe.Pointer, args ...uintptr) uintptr { 109 as := argset{args: unsafe.Pointer(&args[0])} 110 cgocall(fn, unsafe.Pointer(&as)) 111 return as.retval 112 } 113 114 var ncgocall uint64 // number of cgo calls in total for dead m 115 116 // Call from Go to C. 117 // 118 // This must be nosplit because it's used for syscalls on some 119 // platforms. Syscalls may have untyped arguments on the stack, so 120 // it's not safe to grow or scan the stack. 121 // 122 //go:nosplit 123 func cgocall(fn, arg unsafe.Pointer) int32 { 124 if !iscgo && GOOS != "solaris" && GOOS != "illumos" && GOOS != "windows" { 125 throw("cgocall unavailable") 126 } 127 128 if fn == nil { 129 throw("cgocall nil") 130 } 131 132 if raceenabled { 133 racereleasemerge(unsafe.Pointer(&racecgosync)) 134 } 135 136 mp := getg().m 137 mp.ncgocall++ 138 mp.ncgo++ 139 140 // Reset traceback. 141 mp.cgoCallers[0] = 0 142 143 // Announce we are entering a system call 144 // so that the scheduler knows to create another 145 // M to run goroutines while we are in the 146 // foreign code. 147 // 148 // The call to asmcgocall is guaranteed not to 149 // grow the stack and does not allocate memory, 150 // so it is safe to call while "in a system call", outside 151 // the $GOMAXPROCS accounting. 152 // 153 // fn may call back into Go code, in which case we'll exit the 154 // "system call", run the Go code (which may grow the stack), 155 // and then re-enter the "system call" reusing the PC and SP 156 // saved by entersyscall here. 157 entersyscall() 158 159 // Tell asynchronous preemption that we're entering external 160 // code. We do this after entersyscall because this may block 161 // and cause an async preemption to fail, but at this point a 162 // sync preemption will succeed (though this is not a matter 163 // of correctness). 164 osPreemptExtEnter(mp) 165 166 mp.incgo = true 167 errno := asmcgocall(fn, arg) 168 169 // Update accounting before exitsyscall because exitsyscall may 170 // reschedule us on to a different M. 171 mp.incgo = false 172 mp.ncgo-- 173 174 osPreemptExtExit(mp) 175 176 exitsyscall() 177 178 // Note that raceacquire must be called only after exitsyscall has 179 // wired this M to a P. 180 if raceenabled { 181 raceacquire(unsafe.Pointer(&racecgosync)) 182 } 183 184 // From the garbage collector's perspective, time can move 185 // backwards in the sequence above. If there's a callback into 186 // Go code, GC will see this function at the call to 187 // asmcgocall. When the Go call later returns to C, the 188 // syscall PC/SP is rolled back and the GC sees this function 189 // back at the call to entersyscall. Normally, fn and arg 190 // would be live at entersyscall and dead at asmcgocall, so if 191 // time moved backwards, GC would see these arguments as dead 192 // and then live. Prevent these undead arguments from crashing 193 // GC by forcing them to stay live across this time warp. 194 KeepAlive(fn) 195 KeepAlive(arg) 196 KeepAlive(mp) 197 198 return errno 199 } 200 201 // Call from C back to Go. fn must point to an ABIInternal Go entry-point. 202 //go:nosplit 203 func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { 204 gp := getg() 205 if gp != gp.m.curg { 206 println("runtime: bad g in cgocallback") 207 exit(2) 208 } 209 210 // The call from C is on gp.m's g0 stack, so we must ensure 211 // that we stay on that M. We have to do this before calling 212 // exitsyscall, since it would otherwise be free to move us to 213 // a different M. The call to unlockOSThread is in unwindm. 214 lockOSThread() 215 216 checkm := gp.m 217 218 // Save current syscall parameters, so m.syscall can be 219 // used again if callback decide to make syscall. 220 syscall := gp.m.syscall 221 222 // entersyscall saves the caller's SP to allow the GC to trace the Go 223 // stack. However, since we're returning to an earlier stack frame and 224 // need to pair with the entersyscall() call made by cgocall, we must 225 // save syscall* and let reentersyscall restore them. 226 savedsp := unsafe.Pointer(gp.syscallsp) 227 savedpc := gp.syscallpc 228 exitsyscall() // coming out of cgo call 229 gp.m.incgo = false 230 231 osPreemptExtExit(gp.m) 232 233 cgocallbackg1(fn, frame, ctxt) // will call unlockOSThread 234 235 // At this point unlockOSThread has been called. 236 // The following code must not change to a different m. 237 // This is enforced by checking incgo in the schedule function. 238 239 gp.m.incgo = true 240 241 if gp.m != checkm { 242 throw("m changed unexpectedly in cgocallbackg") 243 } 244 245 osPreemptExtEnter(gp.m) 246 247 // going back to cgo call 248 reentersyscall(savedpc, uintptr(savedsp)) 249 250 gp.m.syscall = syscall 251 } 252 253 func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) { 254 gp := getg() 255 256 // When we return, undo the call to lockOSThread in cgocallbackg. 257 // We must still stay on the same m. 258 defer unlockOSThread() 259 260 if gp.m.needextram || atomic.Load(&extraMWaiters) > 0 { 261 gp.m.needextram = false 262 systemstack(newextram) 263 } 264 265 if ctxt != 0 { 266 s := append(gp.cgoCtxt, ctxt) 267 268 // Now we need to set gp.cgoCtxt = s, but we could get 269 // a SIGPROF signal while manipulating the slice, and 270 // the SIGPROF handler could pick up gp.cgoCtxt while 271 // tracing up the stack. We need to ensure that the 272 // handler always sees a valid slice, so set the 273 // values in an order such that it always does. 274 p := (*slice)(unsafe.Pointer(&gp.cgoCtxt)) 275 atomicstorep(unsafe.Pointer(&p.array), unsafe.Pointer(&s[0])) 276 p.cap = cap(s) 277 p.len = len(s) 278 279 defer func(gp *g) { 280 // Decrease the length of the slice by one, safely. 281 p := (*slice)(unsafe.Pointer(&gp.cgoCtxt)) 282 p.len-- 283 }(gp) 284 } 285 286 if gp.m.ncgo == 0 { 287 // The C call to Go came from a thread not currently running 288 // any Go. In the case of -buildmode=c-archive or c-shared, 289 // this call may be coming in before package initialization 290 // is complete. Wait until it is. 291 <-main_init_done 292 } 293 294 // Check whether the profiler needs to be turned on or off; this route to 295 // run Go code does not use runtime.execute, so bypasses the check there. 296 hz := sched.profilehz 297 if gp.m.profilehz != hz { 298 setThreadCPUProfiler(hz) 299 } 300 301 // Add entry to defer stack in case of panic. 302 restore := true 303 defer unwindm(&restore) 304 305 if raceenabled { 306 raceacquire(unsafe.Pointer(&racecgosync)) 307 } 308 309 // Invoke callback. This function is generated by cmd/cgo and 310 // will unpack the argument frame and call the Go function. 311 var cb func(frame unsafe.Pointer) 312 cbFV := funcval{uintptr(fn)} 313 *(*unsafe.Pointer)(unsafe.Pointer(&cb)) = noescape(unsafe.Pointer(&cbFV)) 314 cb(frame) 315 316 if raceenabled { 317 racereleasemerge(unsafe.Pointer(&racecgosync)) 318 } 319 320 // Do not unwind m->g0->sched.sp. 321 // Our caller, cgocallback, will do that. 322 restore = false 323 } 324 325 func unwindm(restore *bool) { 326 if *restore { 327 // Restore sp saved by cgocallback during 328 // unwind of g's stack (see comment at top of file). 329 mp := acquirem() 330 sched := &mp.g0.sched 331 sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + alignUp(sys.MinFrameSize, sys.StackAlign))) 332 333 // Do the accounting that cgocall will not have a chance to do 334 // during an unwind. 335 // 336 // In the case where a Go call originates from C, ncgo is 0 337 // and there is no matching cgocall to end. 338 if mp.ncgo > 0 { 339 mp.incgo = false 340 mp.ncgo-- 341 osPreemptExtExit(mp) 342 } 343 344 releasem(mp) 345 } 346 } 347 348 // called from assembly 349 func badcgocallback() { 350 throw("misaligned stack in cgocallback") 351 } 352 353 // called from (incomplete) assembly 354 func cgounimpl() { 355 throw("cgo not implemented") 356 } 357 358 var racecgosync uint64 // represents possible synchronization in C code 359 360 // Pointer checking for cgo code. 361 362 // We want to detect all cases where a program that does not use 363 // unsafe makes a cgo call passing a Go pointer to memory that 364 // contains a Go pointer. Here a Go pointer is defined as a pointer 365 // to memory allocated by the Go runtime. Programs that use unsafe 366 // can evade this restriction easily, so we don't try to catch them. 367 // The cgo program will rewrite all possibly bad pointer arguments to 368 // call cgoCheckPointer, where we can catch cases of a Go pointer 369 // pointing to a Go pointer. 370 371 // Complicating matters, taking the address of a slice or array 372 // element permits the C program to access all elements of the slice 373 // or array. In that case we will see a pointer to a single element, 374 // but we need to check the entire data structure. 375 376 // The cgoCheckPointer call takes additional arguments indicating that 377 // it was called on an address expression. An additional argument of 378 // true means that it only needs to check a single element. An 379 // additional argument of a slice or array means that it needs to 380 // check the entire slice/array, but nothing else. Otherwise, the 381 // pointer could be anything, and we check the entire heap object, 382 // which is conservative but safe. 383 384 // When and if we implement a moving garbage collector, 385 // cgoCheckPointer will pin the pointer for the duration of the cgo 386 // call. (This is necessary but not sufficient; the cgo program will 387 // also have to change to pin Go pointers that cannot point to Go 388 // pointers.) 389 390 // cgoCheckPointer checks if the argument contains a Go pointer that 391 // points to a Go pointer, and panics if it does. 392 func cgoCheckPointer(ptr any, arg any) { 393 if debug.cgocheck == 0 { 394 return 395 } 396 397 ep := efaceOf(&ptr) 398 t := ep._type 399 400 top := true 401 if arg != nil && (t.kind&kindMask == kindPtr || t.kind&kindMask == kindUnsafePointer) { 402 p := ep.data 403 if t.kind&kindDirectIface == 0 { 404 p = *(*unsafe.Pointer)(p) 405 } 406 if p == nil || !cgoIsGoPointer(p) { 407 return 408 } 409 aep := efaceOf(&arg) 410 switch aep._type.kind & kindMask { 411 case kindBool: 412 if t.kind&kindMask == kindUnsafePointer { 413 // We don't know the type of the element. 414 break 415 } 416 pt := (*ptrtype)(unsafe.Pointer(t)) 417 cgoCheckArg(pt.elem, p, true, false, cgoCheckPointerFail) 418 return 419 case kindSlice: 420 // Check the slice rather than the pointer. 421 ep = aep 422 t = ep._type 423 case kindArray: 424 // Check the array rather than the pointer. 425 // Pass top as false since we have a pointer 426 // to the array. 427 ep = aep 428 t = ep._type 429 top = false 430 default: 431 throw("can't happen") 432 } 433 } 434 435 cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, top, cgoCheckPointerFail) 436 } 437 438 const cgoCheckPointerFail = "cgo argument has Go pointer to Go pointer" 439 const cgoResultFail = "cgo result has Go pointer" 440 441 // cgoCheckArg is the real work of cgoCheckPointer. The argument p 442 // is either a pointer to the value (of type t), or the value itself, 443 // depending on indir. The top parameter is whether we are at the top 444 // level, where Go pointers are allowed. 445 func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { 446 if t.ptrdata == 0 || p == nil { 447 // If the type has no pointers there is nothing to do. 448 return 449 } 450 451 switch t.kind & kindMask { 452 default: 453 throw("can't happen") 454 case kindArray: 455 at := (*arraytype)(unsafe.Pointer(t)) 456 if !indir { 457 if at.len != 1 { 458 throw("can't happen") 459 } 460 cgoCheckArg(at.elem, p, at.elem.kind&kindDirectIface == 0, top, msg) 461 return 462 } 463 for i := uintptr(0); i < at.len; i++ { 464 cgoCheckArg(at.elem, p, true, top, msg) 465 p = add(p, at.elem.size) 466 } 467 case kindChan, kindMap: 468 // These types contain internal pointers that will 469 // always be allocated in the Go heap. It's never OK 470 // to pass them to C. 471 panic(errorString(msg)) 472 case kindFunc: 473 if indir { 474 p = *(*unsafe.Pointer)(p) 475 } 476 if !cgoIsGoPointer(p) { 477 return 478 } 479 panic(errorString(msg)) 480 case kindInterface: 481 it := *(**_type)(p) 482 if it == nil { 483 return 484 } 485 // A type known at compile time is OK since it's 486 // constant. A type not known at compile time will be 487 // in the heap and will not be OK. 488 if inheap(uintptr(unsafe.Pointer(it))) { 489 panic(errorString(msg)) 490 } 491 p = *(*unsafe.Pointer)(add(p, goarch.PtrSize)) 492 if !cgoIsGoPointer(p) { 493 return 494 } 495 if !top { 496 panic(errorString(msg)) 497 } 498 cgoCheckArg(it, p, it.kind&kindDirectIface == 0, false, msg) 499 case kindSlice: 500 st := (*slicetype)(unsafe.Pointer(t)) 501 s := (*slice)(p) 502 p = s.array 503 if p == nil || !cgoIsGoPointer(p) { 504 return 505 } 506 if !top { 507 panic(errorString(msg)) 508 } 509 if st.elem.ptrdata == 0 { 510 return 511 } 512 for i := 0; i < s.cap; i++ { 513 cgoCheckArg(st.elem, p, true, false, msg) 514 p = add(p, st.elem.size) 515 } 516 case kindString: 517 ss := (*stringStruct)(p) 518 if !cgoIsGoPointer(ss.str) { 519 return 520 } 521 if !top { 522 panic(errorString(msg)) 523 } 524 case kindStruct: 525 st := (*structtype)(unsafe.Pointer(t)) 526 if !indir { 527 if len(st.fields) != 1 { 528 throw("can't happen") 529 } 530 cgoCheckArg(st.fields[0].typ, p, st.fields[0].typ.kind&kindDirectIface == 0, top, msg) 531 return 532 } 533 for _, f := range st.fields { 534 if f.typ.ptrdata == 0 { 535 continue 536 } 537 cgoCheckArg(f.typ, add(p, f.offset()), true, top, msg) 538 } 539 case kindPtr, kindUnsafePointer: 540 if indir { 541 p = *(*unsafe.Pointer)(p) 542 if p == nil { 543 return 544 } 545 } 546 547 if !cgoIsGoPointer(p) { 548 return 549 } 550 if !top { 551 panic(errorString(msg)) 552 } 553 554 cgoCheckUnknownPointer(p, msg) 555 } 556 } 557 558 // cgoCheckUnknownPointer is called for an arbitrary pointer into Go 559 // memory. It checks whether that Go memory contains any other 560 // pointer into Go memory. If it does, we panic. 561 // The return values are unused but useful to see in panic tracebacks. 562 func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) { 563 if inheap(uintptr(p)) { 564 b, span, _ := findObject(uintptr(p), 0, 0) 565 base = b 566 if base == 0 { 567 return 568 } 569 hbits := heapBitsForAddr(base) 570 n := span.elemsize 571 for i = uintptr(0); i < n; i += goarch.PtrSize { 572 if !hbits.morePointers() { 573 // No more possible pointers. 574 break 575 } 576 if hbits.isPointer() && cgoIsGoPointer(*(*unsafe.Pointer)(unsafe.Pointer(base + i))) { 577 panic(errorString(msg)) 578 } 579 hbits = hbits.next() 580 } 581 582 return 583 } 584 585 for _, datap := range activeModules() { 586 if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) { 587 // We have no way to know the size of the object. 588 // We have to assume that it might contain a pointer. 589 panic(errorString(msg)) 590 } 591 // In the text or noptr sections, we know that the 592 // pointer does not point to a Go pointer. 593 } 594 595 return 596 } 597 598 // cgoIsGoPointer reports whether the pointer is a Go pointer--a 599 // pointer to Go memory. We only care about Go memory that might 600 // contain pointers. 601 //go:nosplit 602 //go:nowritebarrierrec 603 func cgoIsGoPointer(p unsafe.Pointer) bool { 604 if p == nil { 605 return false 606 } 607 608 if inHeapOrStack(uintptr(p)) { 609 return true 610 } 611 612 for _, datap := range activeModules() { 613 if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) { 614 return true 615 } 616 } 617 618 return false 619 } 620 621 // cgoInRange reports whether p is between start and end. 622 //go:nosplit 623 //go:nowritebarrierrec 624 func cgoInRange(p unsafe.Pointer, start, end uintptr) bool { 625 return start <= uintptr(p) && uintptr(p) < end 626 } 627 628 // cgoCheckResult is called to check the result parameter of an 629 // exported Go function. It panics if the result is or contains a Go 630 // pointer. 631 func cgoCheckResult(val any) { 632 if debug.cgocheck == 0 { 633 return 634 } 635 636 ep := efaceOf(&val) 637 t := ep._type 638 cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, false, cgoResultFail) 639 } 640