Source file src/runtime/os_netbsd.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  const (
    15  	_SS_DISABLE  = 4
    16  	_SIG_BLOCK   = 1
    17  	_SIG_UNBLOCK = 2
    18  	_SIG_SETMASK = 3
    19  	_NSIG        = 33
    20  	_SI_USER     = 0
    21  
    22  	// From NetBSD's <sys/ucontext.h>
    23  	_UC_SIGMASK = 0x01
    24  	_UC_CPU     = 0x04
    25  
    26  	// From <sys/lwp.h>
    27  	_LWP_DETACHED = 0x00000040
    28  )
    29  
    30  type mOS struct {
    31  	waitsemacount uint32
    32  }
    33  
    34  //go:noescape
    35  func setitimer(mode int32, new, old *itimerval)
    36  
    37  //go:noescape
    38  func sigaction(sig uint32, new, old *sigactiont)
    39  
    40  //go:noescape
    41  func sigaltstack(new, old *stackt)
    42  
    43  //go:noescape
    44  func sigprocmask(how int32, new, old *sigset)
    45  
    46  //go:noescape
    47  func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
    48  
    49  func lwp_tramp()
    50  
    51  func raiseproc(sig uint32)
    52  
    53  func lwp_kill(tid int32, sig int)
    54  
    55  //go:noescape
    56  func getcontext(ctxt unsafe.Pointer)
    57  
    58  //go:noescape
    59  func lwp_create(ctxt unsafe.Pointer, flags uintptr, lwpid unsafe.Pointer) int32
    60  
    61  //go:noescape
    62  func lwp_park(clockid, flags int32, ts *timespec, unpark int32, hint, unparkhint unsafe.Pointer) int32
    63  
    64  //go:noescape
    65  func lwp_unpark(lwp int32, hint unsafe.Pointer) int32
    66  
    67  func lwp_self() int32
    68  
    69  func osyield()
    70  
    71  //go:nosplit
    72  func osyield_no_g() {
    73  	osyield()
    74  }
    75  
    76  func kqueue() int32
    77  
    78  //go:noescape
    79  func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
    80  
    81  func pipe() (r, w int32, errno int32)
    82  func pipe2(flags int32) (r, w int32, errno int32)
    83  func closeonexec(fd int32)
    84  func setNonblock(fd int32)
    85  
    86  const (
    87  	_ESRCH     = 3
    88  	_ETIMEDOUT = 60
    89  
    90  	// From NetBSD's <sys/time.h>
    91  	_CLOCK_REALTIME  = 0
    92  	_CLOCK_VIRTUAL   = 1
    93  	_CLOCK_PROF      = 2
    94  	_CLOCK_MONOTONIC = 3
    95  
    96  	_TIMER_RELTIME = 0
    97  	_TIMER_ABSTIME = 1
    98  )
    99  
   100  var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)}}
   101  
   102  // From NetBSD's <sys/sysctl.h>
   103  const (
   104  	_CTL_KERN   = 1
   105  	_KERN_OSREV = 3
   106  
   107  	_CTL_HW        = 6
   108  	_HW_NCPU       = 3
   109  	_HW_PAGESIZE   = 7
   110  	_HW_NCPUONLINE = 16
   111  )
   112  
   113  func sysctlInt(mib []uint32) (int32, bool) {
   114  	var out int32
   115  	nout := unsafe.Sizeof(out)
   116  	ret := sysctl(&mib[0], uint32(len(mib)), (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
   117  	if ret < 0 {
   118  		return 0, false
   119  	}
   120  	return out, true
   121  }
   122  
   123  func getncpu() int32 {
   124  	if n, ok := sysctlInt([]uint32{_CTL_HW, _HW_NCPUONLINE}); ok {
   125  		return int32(n)
   126  	}
   127  	if n, ok := sysctlInt([]uint32{_CTL_HW, _HW_NCPU}); ok {
   128  		return int32(n)
   129  	}
   130  	return 1
   131  }
   132  
   133  func getPageSize() uintptr {
   134  	mib := [2]uint32{_CTL_HW, _HW_PAGESIZE}
   135  	out := uint32(0)
   136  	nout := unsafe.Sizeof(out)
   137  	ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
   138  	if ret >= 0 {
   139  		return uintptr(out)
   140  	}
   141  	return 0
   142  }
   143  
   144  func getOSRev() int {
   145  	if osrev, ok := sysctlInt([]uint32{_CTL_KERN, _KERN_OSREV}); ok {
   146  		return int(osrev)
   147  	}
   148  	return 0
   149  }
   150  
   151  //go:nosplit
   152  func semacreate(mp *m) {
   153  }
   154  
   155  //go:nosplit
   156  func semasleep(ns int64) int32 {
   157  	_g_ := getg()
   158  	var deadline int64
   159  	if ns >= 0 {
   160  		deadline = nanotime() + ns
   161  	}
   162  
   163  	for {
   164  		v := atomic.Load(&_g_.m.waitsemacount)
   165  		if v > 0 {
   166  			if atomic.Cas(&_g_.m.waitsemacount, v, v-1) {
   167  				return 0 // semaphore acquired
   168  			}
   169  			continue
   170  		}
   171  
   172  		// Sleep until unparked by semawakeup or timeout.
   173  		var tsp *timespec
   174  		var ts timespec
   175  		if ns >= 0 {
   176  			wait := deadline - nanotime()
   177  			if wait <= 0 {
   178  				return -1
   179  			}
   180  			ts.setNsec(wait)
   181  			tsp = &ts
   182  		}
   183  		ret := lwp_park(_CLOCK_MONOTONIC, _TIMER_RELTIME, tsp, 0, unsafe.Pointer(&_g_.m.waitsemacount), nil)
   184  		if ret == _ETIMEDOUT {
   185  			return -1
   186  		}
   187  	}
   188  }
   189  
   190  //go:nosplit
   191  func semawakeup(mp *m) {
   192  	atomic.Xadd(&mp.waitsemacount, 1)
   193  	// From NetBSD's _lwp_unpark(2) manual:
   194  	// "If the target LWP is not currently waiting, it will return
   195  	// immediately upon the next call to _lwp_park()."
   196  	ret := lwp_unpark(int32(mp.procid), unsafe.Pointer(&mp.waitsemacount))
   197  	if ret != 0 && ret != _ESRCH {
   198  		// semawakeup can be called on signal stack.
   199  		systemstack(func() {
   200  			print("thrwakeup addr=", &mp.waitsemacount, " sem=", mp.waitsemacount, " ret=", ret, "\n")
   201  		})
   202  	}
   203  }
   204  
   205  // May run with m.p==nil, so write barriers are not allowed.
   206  //go:nowritebarrier
   207  func newosproc(mp *m) {
   208  	stk := unsafe.Pointer(mp.g0.stack.hi)
   209  	if false {
   210  		print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
   211  	}
   212  
   213  	var uc ucontextt
   214  	getcontext(unsafe.Pointer(&uc))
   215  
   216  	// _UC_SIGMASK does not seem to work here.
   217  	// It would be nice if _UC_SIGMASK and _UC_STACK
   218  	// worked so that we could do all the work setting
   219  	// the sigmask and the stack here, instead of setting
   220  	// the mask here and the stack in netbsdMstart.
   221  	// For now do the blocking manually.
   222  	uc.uc_flags = _UC_SIGMASK | _UC_CPU
   223  	uc.uc_link = nil
   224  	uc.uc_sigmask = sigset_all
   225  
   226  	var oset sigset
   227  	sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
   228  
   229  	lwp_mcontext_init(&uc.uc_mcontext, stk, mp, mp.g0, abi.FuncPCABI0(netbsdMstart))
   230  
   231  	ret := lwp_create(unsafe.Pointer(&uc), _LWP_DETACHED, unsafe.Pointer(&mp.procid))
   232  	sigprocmask(_SIG_SETMASK, &oset, nil)
   233  	if ret < 0 {
   234  		print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", -ret, ")\n")
   235  		if ret == -_EAGAIN {
   236  			println("runtime: may need to increase max user processes (ulimit -p)")
   237  		}
   238  		throw("runtime.newosproc")
   239  	}
   240  }
   241  
   242  // mstart is the entry-point for new Ms.
   243  // It is written in assembly, uses ABI0, is marked TOPFRAME, and calls netbsdMstart0.
   244  func netbsdMstart()
   245  
   246  // netbsdMStart0 is the function call that starts executing a newly
   247  // created thread. On NetBSD, a new thread inherits the signal stack
   248  // of the creating thread. That confuses minit, so we remove that
   249  // signal stack here before calling the regular mstart. It's a bit
   250  // baroque to remove a signal stack here only to add one in minit, but
   251  // it's a simple change that keeps NetBSD working like other OS's.
   252  // At this point all signals are blocked, so there is no race.
   253  //go:nosplit
   254  func netbsdMstart0() {
   255  	st := stackt{ss_flags: _SS_DISABLE}
   256  	sigaltstack(&st, nil)
   257  	mstart0()
   258  }
   259  
   260  func osinit() {
   261  	ncpu = getncpu()
   262  	if physPageSize == 0 {
   263  		physPageSize = getPageSize()
   264  	}
   265  	needSysmonWorkaround = getOSRev() < 902000000 // NetBSD 9.2
   266  }
   267  
   268  var urandom_dev = []byte("/dev/urandom\x00")
   269  
   270  //go:nosplit
   271  func getRandomData(r []byte) {
   272  	fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
   273  	n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
   274  	closefd(fd)
   275  	extendRandom(r, int(n))
   276  }
   277  
   278  func goenvs() {
   279  	goenvs_unix()
   280  }
   281  
   282  // Called to initialize a new m (including the bootstrap m).
   283  // Called on the parent thread (main thread in case of bootstrap), can allocate memory.
   284  func mpreinit(mp *m) {
   285  	mp.gsignal = malg(32 * 1024)
   286  	mp.gsignal.m = mp
   287  }
   288  
   289  // Called to initialize a new m (including the bootstrap m).
   290  // Called on the new thread, cannot allocate memory.
   291  func minit() {
   292  	_g_ := getg()
   293  	_g_.m.procid = uint64(lwp_self())
   294  
   295  	// On NetBSD a thread created by pthread_create inherits the
   296  	// signal stack of the creating thread. We always create a
   297  	// new signal stack here, to avoid having two Go threads using
   298  	// the same signal stack. This breaks the case of a thread
   299  	// created in C that calls sigaltstack and then calls a Go
   300  	// function, because we will lose track of the C code's
   301  	// sigaltstack, but it's the best we can do.
   302  	signalstack(&_g_.m.gsignal.stack)
   303  	_g_.m.newSigstack = true
   304  
   305  	minitSignalMask()
   306  }
   307  
   308  // Called from dropm to undo the effect of an minit.
   309  //go:nosplit
   310  func unminit() {
   311  	unminitSignals()
   312  }
   313  
   314  // Called from exitm, but not from drop, to undo the effect of thread-owned
   315  // resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
   316  func mdestroy(mp *m) {
   317  }
   318  
   319  func sigtramp()
   320  
   321  type sigactiont struct {
   322  	sa_sigaction uintptr
   323  	sa_mask      sigset
   324  	sa_flags     int32
   325  }
   326  
   327  //go:nosplit
   328  //go:nowritebarrierrec
   329  func setsig(i uint32, fn uintptr) {
   330  	var sa sigactiont
   331  	sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
   332  	sa.sa_mask = sigset_all
   333  	if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
   334  		fn = abi.FuncPCABI0(sigtramp)
   335  	}
   336  	sa.sa_sigaction = fn
   337  	sigaction(i, &sa, nil)
   338  }
   339  
   340  //go:nosplit
   341  //go:nowritebarrierrec
   342  func setsigstack(i uint32) {
   343  	throw("setsigstack")
   344  }
   345  
   346  //go:nosplit
   347  //go:nowritebarrierrec
   348  func getsig(i uint32) uintptr {
   349  	var sa sigactiont
   350  	sigaction(i, nil, &sa)
   351  	return sa.sa_sigaction
   352  }
   353  
   354  // setSignaltstackSP sets the ss_sp field of a stackt.
   355  //go:nosplit
   356  func setSignalstackSP(s *stackt, sp uintptr) {
   357  	s.ss_sp = sp
   358  }
   359  
   360  //go:nosplit
   361  //go:nowritebarrierrec
   362  func sigaddset(mask *sigset, i int) {
   363  	mask.__bits[(i-1)/32] |= 1 << ((uint32(i) - 1) & 31)
   364  }
   365  
   366  func sigdelset(mask *sigset, i int) {
   367  	mask.__bits[(i-1)/32] &^= 1 << ((uint32(i) - 1) & 31)
   368  }
   369  
   370  //go:nosplit
   371  func (c *sigctxt) fixsigcode(sig uint32) {
   372  }
   373  
   374  func setProcessCPUProfiler(hz int32) {
   375  	setProcessCPUProfilerTimer(hz)
   376  }
   377  
   378  func setThreadCPUProfiler(hz int32) {
   379  	setThreadCPUProfilerHz(hz)
   380  }
   381  
   382  //go:nosplit
   383  func validSIGPROF(mp *m, c *sigctxt) bool {
   384  	return true
   385  }
   386  
   387  func sysargs(argc int32, argv **byte) {
   388  	n := argc + 1
   389  
   390  	// skip over argv, envp to get to auxv
   391  	for argv_index(argv, n) != nil {
   392  		n++
   393  	}
   394  
   395  	// skip NULL separator
   396  	n++
   397  
   398  	// now argv+n is auxv
   399  	auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
   400  	sysauxv(auxv[:])
   401  }
   402  
   403  const (
   404  	_AT_NULL   = 0 // Terminates the vector
   405  	_AT_PAGESZ = 6 // Page size in bytes
   406  )
   407  
   408  func sysauxv(auxv []uintptr) {
   409  	for i := 0; auxv[i] != _AT_NULL; i += 2 {
   410  		tag, val := auxv[i], auxv[i+1]
   411  		switch tag {
   412  		case _AT_PAGESZ:
   413  			physPageSize = val
   414  		}
   415  	}
   416  }
   417  
   418  // raise sends signal to the calling thread.
   419  //
   420  // It must be nosplit because it is used by the signal handler before
   421  // it definitely has a Go stack.
   422  //
   423  //go:nosplit
   424  func raise(sig uint32) {
   425  	lwp_kill(lwp_self(), int(sig))
   426  }
   427  
   428  func signalM(mp *m, sig int) {
   429  	lwp_kill(int32(mp.procid), sig)
   430  }
   431  
   432  // sigPerThreadSyscall is only used on linux, so we assign a bogus signal
   433  // number.
   434  const sigPerThreadSyscall = 1 << 31
   435  
   436  //go:nosplit
   437  func runPerThreadSyscall() {
   438  	throw("runPerThreadSyscall only valid on linux")
   439  }
   440  

View as plain text