Source file src/runtime/panic.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/goarch"
     9  	"runtime/internal/atomic"
    10  	"runtime/internal/sys"
    11  	"unsafe"
    12  )
    13  
    14  // We have two different ways of doing defers. The older way involves creating a
    15  // defer record at the time that a defer statement is executing and adding it to a
    16  // defer chain. This chain is inspected by the deferreturn call at all function
    17  // exits in order to run the appropriate defer calls. A cheaper way (which we call
    18  // open-coded defers) is used for functions in which no defer statements occur in
    19  // loops. In that case, we simply store the defer function/arg information into
    20  // specific stack slots at the point of each defer statement, as well as setting a
    21  // bit in a bitmask. At each function exit, we add inline code to directly make
    22  // the appropriate defer calls based on the bitmask and fn/arg information stored
    23  // on the stack. During panic/Goexit processing, the appropriate defer calls are
    24  // made using extra funcdata info that indicates the exact stack slots that
    25  // contain the bitmask and defer fn/args.
    26  
    27  // Check to make sure we can really generate a panic. If the panic
    28  // was generated from the runtime, or from inside malloc, then convert
    29  // to a throw of msg.
    30  // pc should be the program counter of the compiler-generated code that
    31  // triggered this panic.
    32  func panicCheck1(pc uintptr, msg string) {
    33  	if goarch.IsWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") {
    34  		// Note: wasm can't tail call, so we can't get the original caller's pc.
    35  		throw(msg)
    36  	}
    37  	// TODO: is this redundant? How could we be in malloc
    38  	// but not in the runtime? runtime/internal/*, maybe?
    39  	gp := getg()
    40  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    41  		throw(msg)
    42  	}
    43  }
    44  
    45  // Same as above, but calling from the runtime is allowed.
    46  //
    47  // Using this function is necessary for any panic that may be
    48  // generated by runtime.sigpanic, since those are always called by the
    49  // runtime.
    50  func panicCheck2(err string) {
    51  	// panic allocates, so to avoid recursive malloc, turn panics
    52  	// during malloc into throws.
    53  	gp := getg()
    54  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    55  		throw(err)
    56  	}
    57  }
    58  
    59  // Many of the following panic entry-points turn into throws when they
    60  // happen in various runtime contexts. These should never happen in
    61  // the runtime, and if they do, they indicate a serious issue and
    62  // should not be caught by user code.
    63  //
    64  // The panic{Index,Slice,divide,shift} functions are called by
    65  // code generated by the compiler for out of bounds index expressions,
    66  // out of bounds slice expressions, division by zero, and shift by negative.
    67  // The panicdivide (again), panicoverflow, panicfloat, and panicmem
    68  // functions are called by the signal handler when a signal occurs
    69  // indicating the respective problem.
    70  //
    71  // Since panic{Index,Slice,shift} are never called directly, and
    72  // since the runtime package should never have an out of bounds slice
    73  // or array reference or negative shift, if we see those functions called from the
    74  // runtime package we turn the panic into a throw. That will dump the
    75  // entire runtime stack for easier debugging.
    76  //
    77  // The entry points called by the signal handler will be called from
    78  // runtime.sigpanic, so we can't disallow calls from the runtime to
    79  // these (they always look like they're called from the runtime).
    80  // Hence, for these, we just check for clearly bad runtime conditions.
    81  //
    82  // The panic{Index,Slice} functions are implemented in assembly and tail call
    83  // to the goPanic{Index,Slice} functions below. This is done so we can use
    84  // a space-minimal register calling convention.
    85  
    86  // failures in the comparisons for s[x], 0 <= x < y (y == len(s))
    87  func goPanicIndex(x int, y int) {
    88  	panicCheck1(getcallerpc(), "index out of range")
    89  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex})
    90  }
    91  func goPanicIndexU(x uint, y int) {
    92  	panicCheck1(getcallerpc(), "index out of range")
    93  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex})
    94  }
    95  
    96  // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
    97  func goPanicSliceAlen(x int, y int) {
    98  	panicCheck1(getcallerpc(), "slice bounds out of range")
    99  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen})
   100  }
   101  func goPanicSliceAlenU(x uint, y int) {
   102  	panicCheck1(getcallerpc(), "slice bounds out of range")
   103  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen})
   104  }
   105  func goPanicSliceAcap(x int, y int) {
   106  	panicCheck1(getcallerpc(), "slice bounds out of range")
   107  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap})
   108  }
   109  func goPanicSliceAcapU(x uint, y int) {
   110  	panicCheck1(getcallerpc(), "slice bounds out of range")
   111  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap})
   112  }
   113  
   114  // failures in the comparisons for s[x:y], 0 <= x <= y
   115  func goPanicSliceB(x int, y int) {
   116  	panicCheck1(getcallerpc(), "slice bounds out of range")
   117  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB})
   118  }
   119  func goPanicSliceBU(x uint, y int) {
   120  	panicCheck1(getcallerpc(), "slice bounds out of range")
   121  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB})
   122  }
   123  
   124  // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
   125  func goPanicSlice3Alen(x int, y int) {
   126  	panicCheck1(getcallerpc(), "slice bounds out of range")
   127  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen})
   128  }
   129  func goPanicSlice3AlenU(x uint, y int) {
   130  	panicCheck1(getcallerpc(), "slice bounds out of range")
   131  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen})
   132  }
   133  func goPanicSlice3Acap(x int, y int) {
   134  	panicCheck1(getcallerpc(), "slice bounds out of range")
   135  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap})
   136  }
   137  func goPanicSlice3AcapU(x uint, y int) {
   138  	panicCheck1(getcallerpc(), "slice bounds out of range")
   139  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap})
   140  }
   141  
   142  // failures in the comparisons for s[:x:y], 0 <= x <= y
   143  func goPanicSlice3B(x int, y int) {
   144  	panicCheck1(getcallerpc(), "slice bounds out of range")
   145  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B})
   146  }
   147  func goPanicSlice3BU(x uint, y int) {
   148  	panicCheck1(getcallerpc(), "slice bounds out of range")
   149  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B})
   150  }
   151  
   152  // failures in the comparisons for s[x:y:], 0 <= x <= y
   153  func goPanicSlice3C(x int, y int) {
   154  	panicCheck1(getcallerpc(), "slice bounds out of range")
   155  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C})
   156  }
   157  func goPanicSlice3CU(x uint, y int) {
   158  	panicCheck1(getcallerpc(), "slice bounds out of range")
   159  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C})
   160  }
   161  
   162  // failures in the conversion (*[x]T)s, 0 <= x <= y, x == cap(s)
   163  func goPanicSliceConvert(x int, y int) {
   164  	panicCheck1(getcallerpc(), "slice length too short to convert to pointer to array")
   165  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert})
   166  }
   167  
   168  // Implemented in assembly, as they take arguments in registers.
   169  // Declared here to mark them as ABIInternal.
   170  func panicIndex(x int, y int)
   171  func panicIndexU(x uint, y int)
   172  func panicSliceAlen(x int, y int)
   173  func panicSliceAlenU(x uint, y int)
   174  func panicSliceAcap(x int, y int)
   175  func panicSliceAcapU(x uint, y int)
   176  func panicSliceB(x int, y int)
   177  func panicSliceBU(x uint, y int)
   178  func panicSlice3Alen(x int, y int)
   179  func panicSlice3AlenU(x uint, y int)
   180  func panicSlice3Acap(x int, y int)
   181  func panicSlice3AcapU(x uint, y int)
   182  func panicSlice3B(x int, y int)
   183  func panicSlice3BU(x uint, y int)
   184  func panicSlice3C(x int, y int)
   185  func panicSlice3CU(x uint, y int)
   186  func panicSliceConvert(x int, y int)
   187  
   188  var shiftError = error(errorString("negative shift amount"))
   189  
   190  func panicshift() {
   191  	panicCheck1(getcallerpc(), "negative shift amount")
   192  	panic(shiftError)
   193  }
   194  
   195  var divideError = error(errorString("integer divide by zero"))
   196  
   197  func panicdivide() {
   198  	panicCheck2("integer divide by zero")
   199  	panic(divideError)
   200  }
   201  
   202  var overflowError = error(errorString("integer overflow"))
   203  
   204  func panicoverflow() {
   205  	panicCheck2("integer overflow")
   206  	panic(overflowError)
   207  }
   208  
   209  var floatError = error(errorString("floating point error"))
   210  
   211  func panicfloat() {
   212  	panicCheck2("floating point error")
   213  	panic(floatError)
   214  }
   215  
   216  var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
   217  
   218  func panicmem() {
   219  	panicCheck2("invalid memory address or nil pointer dereference")
   220  	panic(memoryError)
   221  }
   222  
   223  func panicmemAddr(addr uintptr) {
   224  	panicCheck2("invalid memory address or nil pointer dereference")
   225  	panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr})
   226  }
   227  
   228  // Create a new deferred function fn, which has no arguments and results.
   229  // The compiler turns a defer statement into a call to this.
   230  func deferproc(fn func()) {
   231  	gp := getg()
   232  	if gp.m.curg != gp {
   233  		// go code on the system stack can't defer
   234  		throw("defer on system stack")
   235  	}
   236  
   237  	d := newdefer()
   238  	if d._panic != nil {
   239  		throw("deferproc: d.panic != nil after newdefer")
   240  	}
   241  	d.link = gp._defer
   242  	gp._defer = d
   243  	d.fn = fn
   244  	d.pc = getcallerpc()
   245  	// We must not be preempted between calling getcallersp and
   246  	// storing it to d.sp because getcallersp's result is a
   247  	// uintptr stack pointer.
   248  	d.sp = getcallersp()
   249  
   250  	// deferproc returns 0 normally.
   251  	// a deferred func that stops a panic
   252  	// makes the deferproc return 1.
   253  	// the code the compiler generates always
   254  	// checks the return value and jumps to the
   255  	// end of the function if deferproc returns != 0.
   256  	return0()
   257  	// No code can go here - the C return register has
   258  	// been set and must not be clobbered.
   259  }
   260  
   261  // deferprocStack queues a new deferred function with a defer record on the stack.
   262  // The defer record must have its fn field initialized.
   263  // All other fields can contain junk.
   264  // Nosplit because of the uninitialized pointer fields on the stack.
   265  //
   266  //go:nosplit
   267  func deferprocStack(d *_defer) {
   268  	gp := getg()
   269  	if gp.m.curg != gp {
   270  		// go code on the system stack can't defer
   271  		throw("defer on system stack")
   272  	}
   273  	// fn is already set.
   274  	// The other fields are junk on entry to deferprocStack and
   275  	// are initialized here.
   276  	d.started = false
   277  	d.heap = false
   278  	d.openDefer = false
   279  	d.sp = getcallersp()
   280  	d.pc = getcallerpc()
   281  	d.framepc = 0
   282  	d.varp = 0
   283  	// The lines below implement:
   284  	//   d.panic = nil
   285  	//   d.fd = nil
   286  	//   d.link = gp._defer
   287  	//   gp._defer = d
   288  	// But without write barriers. The first three are writes to
   289  	// the stack so they don't need a write barrier, and furthermore
   290  	// are to uninitialized memory, so they must not use a write barrier.
   291  	// The fourth write does not require a write barrier because we
   292  	// explicitly mark all the defer structures, so we don't need to
   293  	// keep track of pointers to them with a write barrier.
   294  	*(*uintptr)(unsafe.Pointer(&d._panic)) = 0
   295  	*(*uintptr)(unsafe.Pointer(&d.fd)) = 0
   296  	*(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
   297  	*(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
   298  
   299  	return0()
   300  	// No code can go here - the C return register has
   301  	// been set and must not be clobbered.
   302  }
   303  
   304  // Each P holds a pool for defers.
   305  
   306  // Allocate a Defer, usually using per-P pool.
   307  // Each defer must be released with freedefer.  The defer is not
   308  // added to any defer chain yet.
   309  func newdefer() *_defer {
   310  	var d *_defer
   311  	mp := acquirem()
   312  	pp := mp.p.ptr()
   313  	if len(pp.deferpool) == 0 && sched.deferpool != nil {
   314  		lock(&sched.deferlock)
   315  		for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
   316  			d := sched.deferpool
   317  			sched.deferpool = d.link
   318  			d.link = nil
   319  			pp.deferpool = append(pp.deferpool, d)
   320  		}
   321  		unlock(&sched.deferlock)
   322  	}
   323  	if n := len(pp.deferpool); n > 0 {
   324  		d = pp.deferpool[n-1]
   325  		pp.deferpool[n-1] = nil
   326  		pp.deferpool = pp.deferpool[:n-1]
   327  	}
   328  	releasem(mp)
   329  	mp, pp = nil, nil
   330  
   331  	if d == nil {
   332  		// Allocate new defer.
   333  		d = new(_defer)
   334  	}
   335  	d.heap = true
   336  	return d
   337  }
   338  
   339  // Free the given defer.
   340  // The defer cannot be used after this call.
   341  //
   342  // This is nosplit because the incoming defer is in a perilous state.
   343  // It's not on any defer list, so stack copying won't adjust stack
   344  // pointers in it (namely, d.link). Hence, if we were to copy the
   345  // stack, d could then contain a stale pointer.
   346  //
   347  //go:nosplit
   348  func freedefer(d *_defer) {
   349  	d.link = nil
   350  	// After this point we can copy the stack.
   351  
   352  	if d._panic != nil {
   353  		freedeferpanic()
   354  	}
   355  	if d.fn != nil {
   356  		freedeferfn()
   357  	}
   358  	if !d.heap {
   359  		return
   360  	}
   361  
   362  	mp := acquirem()
   363  	pp := mp.p.ptr()
   364  	if len(pp.deferpool) == cap(pp.deferpool) {
   365  		// Transfer half of local cache to the central cache.
   366  		var first, last *_defer
   367  		for len(pp.deferpool) > cap(pp.deferpool)/2 {
   368  			n := len(pp.deferpool)
   369  			d := pp.deferpool[n-1]
   370  			pp.deferpool[n-1] = nil
   371  			pp.deferpool = pp.deferpool[:n-1]
   372  			if first == nil {
   373  				first = d
   374  			} else {
   375  				last.link = d
   376  			}
   377  			last = d
   378  		}
   379  		lock(&sched.deferlock)
   380  		last.link = sched.deferpool
   381  		sched.deferpool = first
   382  		unlock(&sched.deferlock)
   383  	}
   384  
   385  	*d = _defer{}
   386  
   387  	pp.deferpool = append(pp.deferpool, d)
   388  
   389  	releasem(mp)
   390  	mp, pp = nil, nil
   391  }
   392  
   393  // Separate function so that it can split stack.
   394  // Windows otherwise runs out of stack space.
   395  func freedeferpanic() {
   396  	// _panic must be cleared before d is unlinked from gp.
   397  	throw("freedefer with d._panic != nil")
   398  }
   399  
   400  func freedeferfn() {
   401  	// fn must be cleared before d is unlinked from gp.
   402  	throw("freedefer with d.fn != nil")
   403  }
   404  
   405  // deferreturn runs deferred functions for the caller's frame.
   406  // The compiler inserts a call to this at the end of any
   407  // function which calls defer.
   408  func deferreturn() {
   409  	gp := getg()
   410  	for {
   411  		d := gp._defer
   412  		if d == nil {
   413  			return
   414  		}
   415  		sp := getcallersp()
   416  		if d.sp != sp {
   417  			return
   418  		}
   419  		if d.openDefer {
   420  			done := runOpenDeferFrame(gp, d)
   421  			if !done {
   422  				throw("unfinished open-coded defers in deferreturn")
   423  			}
   424  			gp._defer = d.link
   425  			freedefer(d)
   426  			// If this frame uses open defers, then this
   427  			// must be the only defer record for the
   428  			// frame, so we can just return.
   429  			return
   430  		}
   431  
   432  		fn := d.fn
   433  		d.fn = nil
   434  		gp._defer = d.link
   435  		freedefer(d)
   436  		fn()
   437  	}
   438  }
   439  
   440  // Goexit terminates the goroutine that calls it. No other goroutine is affected.
   441  // Goexit runs all deferred calls before terminating the goroutine. Because Goexit
   442  // is not a panic, any recover calls in those deferred functions will return nil.
   443  //
   444  // Calling Goexit from the main goroutine terminates that goroutine
   445  // without func main returning. Since func main has not returned,
   446  // the program continues execution of other goroutines.
   447  // If all other goroutines exit, the program crashes.
   448  func Goexit() {
   449  	// Run all deferred functions for the current goroutine.
   450  	// This code is similar to gopanic, see that implementation
   451  	// for detailed comments.
   452  	gp := getg()
   453  
   454  	// Create a panic object for Goexit, so we can recognize when it might be
   455  	// bypassed by a recover().
   456  	var p _panic
   457  	p.goexit = true
   458  	p.link = gp._panic
   459  	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   460  
   461  	addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
   462  	for {
   463  		d := gp._defer
   464  		if d == nil {
   465  			break
   466  		}
   467  		if d.started {
   468  			if d._panic != nil {
   469  				d._panic.aborted = true
   470  				d._panic = nil
   471  			}
   472  			if !d.openDefer {
   473  				d.fn = nil
   474  				gp._defer = d.link
   475  				freedefer(d)
   476  				continue
   477  			}
   478  		}
   479  		d.started = true
   480  		d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   481  		if d.openDefer {
   482  			done := runOpenDeferFrame(gp, d)
   483  			if !done {
   484  				// We should always run all defers in the frame,
   485  				// since there is no panic associated with this
   486  				// defer that can be recovered.
   487  				throw("unfinished open-coded defers in Goexit")
   488  			}
   489  			if p.aborted {
   490  				// Since our current defer caused a panic and may
   491  				// have been already freed, just restart scanning
   492  				// for open-coded defers from this frame again.
   493  				addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
   494  			} else {
   495  				addOneOpenDeferFrame(gp, 0, nil)
   496  			}
   497  		} else {
   498  			// Save the pc/sp in deferCallSave(), so we can "recover" back to this
   499  			// loop if necessary.
   500  			deferCallSave(&p, d.fn)
   501  		}
   502  		if p.aborted {
   503  			// We had a recursive panic in the defer d we started, and
   504  			// then did a recover in a defer that was further down the
   505  			// defer chain than d. In the case of an outstanding Goexit,
   506  			// we force the recover to return back to this loop. d will
   507  			// have already been freed if completed, so just continue
   508  			// immediately to the next defer on the chain.
   509  			p.aborted = false
   510  			continue
   511  		}
   512  		if gp._defer != d {
   513  			throw("bad defer entry in Goexit")
   514  		}
   515  		d._panic = nil
   516  		d.fn = nil
   517  		gp._defer = d.link
   518  		freedefer(d)
   519  		// Note: we ignore recovers here because Goexit isn't a panic
   520  	}
   521  	goexit1()
   522  }
   523  
   524  // Call all Error and String methods before freezing the world.
   525  // Used when crashing with panicking.
   526  func preprintpanics(p *_panic) {
   527  	defer func() {
   528  		if recover() != nil {
   529  			throw("panic while printing panic value")
   530  		}
   531  	}()
   532  	for p != nil {
   533  		switch v := p.arg.(type) {
   534  		case error:
   535  			p.arg = v.Error()
   536  		case stringer:
   537  			p.arg = v.String()
   538  		}
   539  		p = p.link
   540  	}
   541  }
   542  
   543  // Print all currently active panics. Used when crashing.
   544  // Should only be called after preprintpanics.
   545  func printpanics(p *_panic) {
   546  	if p.link != nil {
   547  		printpanics(p.link)
   548  		if !p.link.goexit {
   549  			print("\t")
   550  		}
   551  	}
   552  	if p.goexit {
   553  		return
   554  	}
   555  	print("panic: ")
   556  	printany(p.arg)
   557  	if p.recovered {
   558  		print(" [recovered]")
   559  	}
   560  	print("\n")
   561  }
   562  
   563  // addOneOpenDeferFrame scans the stack (in gentraceback order, from inner frames to
   564  // outer frames) for the first frame (if any) with open-coded defers. If it finds
   565  // one, it adds a single entry to the defer chain for that frame. The entry added
   566  // represents all the defers in the associated open defer frame, and is sorted in
   567  // order with respect to any non-open-coded defers.
   568  //
   569  // addOneOpenDeferFrame stops (possibly without adding a new entry) if it encounters
   570  // an in-progress open defer entry. An in-progress open defer entry means there has
   571  // been a new panic because of a defer in the associated frame. addOneOpenDeferFrame
   572  // does not add an open defer entry past a started entry, because that started entry
   573  // still needs to finished, and addOneOpenDeferFrame will be called when that started
   574  // entry is completed. The defer removal loop in gopanic() similarly stops at an
   575  // in-progress defer entry. Together, addOneOpenDeferFrame and the defer removal loop
   576  // ensure the invariant that there is no open defer entry further up the stack than
   577  // an in-progress defer, and also that the defer removal loop is guaranteed to remove
   578  // all not-in-progress open defer entries from the defer chain.
   579  //
   580  // If sp is non-nil, addOneOpenDeferFrame starts the stack scan from the frame
   581  // specified by sp. If sp is nil, it uses the sp from the current defer record (which
   582  // has just been finished). Hence, it continues the stack scan from the frame of the
   583  // defer that just finished. It skips any frame that already has a (not-in-progress)
   584  // open-coded _defer record in the defer chain.
   585  //
   586  // Note: All entries of the defer chain (including this new open-coded entry) have
   587  // their pointers (including sp) adjusted properly if the stack moves while
   588  // running deferred functions. Also, it is safe to pass in the sp arg (which is
   589  // the direct result of calling getcallersp()), because all pointer variables
   590  // (including arguments) are adjusted as needed during stack copies.
   591  func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer) {
   592  	var prevDefer *_defer
   593  	if sp == nil {
   594  		prevDefer = gp._defer
   595  		pc = prevDefer.framepc
   596  		sp = unsafe.Pointer(prevDefer.sp)
   597  	}
   598  	systemstack(func() {
   599  		gentraceback(pc, uintptr(sp), 0, gp, 0, nil, 0x7fffffff,
   600  			func(frame *stkframe, unused unsafe.Pointer) bool {
   601  				if prevDefer != nil && prevDefer.sp == frame.sp {
   602  					// Skip the frame for the previous defer that
   603  					// we just finished (and was used to set
   604  					// where we restarted the stack scan)
   605  					return true
   606  				}
   607  				f := frame.fn
   608  				fd := funcdata(f, _FUNCDATA_OpenCodedDeferInfo)
   609  				if fd == nil {
   610  					return true
   611  				}
   612  				// Insert the open defer record in the
   613  				// chain, in order sorted by sp.
   614  				d := gp._defer
   615  				var prev *_defer
   616  				for d != nil {
   617  					dsp := d.sp
   618  					if frame.sp < dsp {
   619  						break
   620  					}
   621  					if frame.sp == dsp {
   622  						if !d.openDefer {
   623  							throw("duplicated defer entry")
   624  						}
   625  						// Don't add any record past an
   626  						// in-progress defer entry. We don't
   627  						// need it, and more importantly, we
   628  						// want to keep the invariant that
   629  						// there is no open defer entry
   630  						// passed an in-progress entry (see
   631  						// header comment).
   632  						if d.started {
   633  							return false
   634  						}
   635  						return true
   636  					}
   637  					prev = d
   638  					d = d.link
   639  				}
   640  				if frame.fn.deferreturn == 0 {
   641  					throw("missing deferreturn")
   642  				}
   643  
   644  				d1 := newdefer()
   645  				d1.openDefer = true
   646  				d1._panic = nil
   647  				// These are the pc/sp to set after we've
   648  				// run a defer in this frame that did a
   649  				// recover. We return to a special
   650  				// deferreturn that runs any remaining
   651  				// defers and then returns from the
   652  				// function.
   653  				d1.pc = frame.fn.entry() + uintptr(frame.fn.deferreturn)
   654  				d1.varp = frame.varp
   655  				d1.fd = fd
   656  				// Save the SP/PC associated with current frame,
   657  				// so we can continue stack trace later if needed.
   658  				d1.framepc = frame.pc
   659  				d1.sp = frame.sp
   660  				d1.link = d
   661  				if prev == nil {
   662  					gp._defer = d1
   663  				} else {
   664  					prev.link = d1
   665  				}
   666  				// Stop stack scanning after adding one open defer record
   667  				return false
   668  			},
   669  			nil, 0)
   670  	})
   671  }
   672  
   673  // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the
   674  // uint32 and a pointer to the byte following the varint.
   675  //
   676  // There is a similar function runtime.readvarint, which takes a slice of bytes,
   677  // rather than an unsafe pointer. These functions are duplicated, because one of
   678  // the two use cases for the functions would get slower if the functions were
   679  // combined.
   680  func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) {
   681  	var r uint32
   682  	var shift int
   683  	for {
   684  		b := *(*uint8)((unsafe.Pointer(fd)))
   685  		fd = add(fd, unsafe.Sizeof(b))
   686  		if b < 128 {
   687  			return r + uint32(b)<<shift, fd
   688  		}
   689  		r += ((uint32(b) &^ 128) << shift)
   690  		shift += 7
   691  		if shift > 28 {
   692  			panic("Bad varint")
   693  		}
   694  	}
   695  }
   696  
   697  // runOpenDeferFrame runs the active open-coded defers in the frame specified by
   698  // d. It normally processes all active defers in the frame, but stops immediately
   699  // if a defer does a successful recover. It returns true if there are no
   700  // remaining defers to run in the frame.
   701  func runOpenDeferFrame(gp *g, d *_defer) bool {
   702  	done := true
   703  	fd := d.fd
   704  
   705  	deferBitsOffset, fd := readvarintUnsafe(fd)
   706  	nDefers, fd := readvarintUnsafe(fd)
   707  	deferBits := *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset)))
   708  
   709  	for i := int(nDefers) - 1; i >= 0; i-- {
   710  		// read the funcdata info for this defer
   711  		var closureOffset uint32
   712  		closureOffset, fd = readvarintUnsafe(fd)
   713  		if deferBits&(1<<i) == 0 {
   714  			continue
   715  		}
   716  		closure := *(*func())(unsafe.Pointer(d.varp - uintptr(closureOffset)))
   717  		d.fn = closure
   718  		deferBits = deferBits &^ (1 << i)
   719  		*(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) = deferBits
   720  		p := d._panic
   721  		// Call the defer. Note that this can change d.varp if
   722  		// the stack moves.
   723  		deferCallSave(p, d.fn)
   724  		if p != nil && p.aborted {
   725  			break
   726  		}
   727  		d.fn = nil
   728  		if d._panic != nil && d._panic.recovered {
   729  			done = deferBits == 0
   730  			break
   731  		}
   732  	}
   733  
   734  	return done
   735  }
   736  
   737  // deferCallSave calls fn() after saving the caller's pc and sp in the
   738  // panic record. This allows the runtime to return to the Goexit defer
   739  // processing loop, in the unusual case where the Goexit may be
   740  // bypassed by a successful recover.
   741  //
   742  // This is marked as a wrapper by the compiler so it doesn't appear in
   743  // tracebacks.
   744  func deferCallSave(p *_panic, fn func()) {
   745  	if p != nil {
   746  		p.argp = unsafe.Pointer(getargp())
   747  		p.pc = getcallerpc()
   748  		p.sp = unsafe.Pointer(getcallersp())
   749  	}
   750  	fn()
   751  	if p != nil {
   752  		p.pc = 0
   753  		p.sp = unsafe.Pointer(nil)
   754  	}
   755  }
   756  
   757  // The implementation of the predeclared function panic.
   758  func gopanic(e any) {
   759  	gp := getg()
   760  	if gp.m.curg != gp {
   761  		print("panic: ")
   762  		printany(e)
   763  		print("\n")
   764  		throw("panic on system stack")
   765  	}
   766  
   767  	if gp.m.mallocing != 0 {
   768  		print("panic: ")
   769  		printany(e)
   770  		print("\n")
   771  		throw("panic during malloc")
   772  	}
   773  	if gp.m.preemptoff != "" {
   774  		print("panic: ")
   775  		printany(e)
   776  		print("\n")
   777  		print("preempt off reason: ")
   778  		print(gp.m.preemptoff)
   779  		print("\n")
   780  		throw("panic during preemptoff")
   781  	}
   782  	if gp.m.locks != 0 {
   783  		print("panic: ")
   784  		printany(e)
   785  		print("\n")
   786  		throw("panic holding locks")
   787  	}
   788  
   789  	var p _panic
   790  	p.arg = e
   791  	p.link = gp._panic
   792  	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   793  
   794  	atomic.Xadd(&runningPanicDefers, 1)
   795  
   796  	// By calculating getcallerpc/getcallersp here, we avoid scanning the
   797  	// gopanic frame (stack scanning is slow...)
   798  	addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
   799  
   800  	for {
   801  		d := gp._defer
   802  		if d == nil {
   803  			break
   804  		}
   805  
   806  		// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
   807  		// take defer off list. An earlier panic will not continue running, but we will make sure below that an
   808  		// earlier Goexit does continue running.
   809  		if d.started {
   810  			if d._panic != nil {
   811  				d._panic.aborted = true
   812  			}
   813  			d._panic = nil
   814  			if !d.openDefer {
   815  				// For open-coded defers, we need to process the
   816  				// defer again, in case there are any other defers
   817  				// to call in the frame (not including the defer
   818  				// call that caused the panic).
   819  				d.fn = nil
   820  				gp._defer = d.link
   821  				freedefer(d)
   822  				continue
   823  			}
   824  		}
   825  
   826  		// Mark defer as started, but keep on list, so that traceback
   827  		// can find and update the defer's argument frame if stack growth
   828  		// or a garbage collection happens before executing d.fn.
   829  		d.started = true
   830  
   831  		// Record the panic that is running the defer.
   832  		// If there is a new panic during the deferred call, that panic
   833  		// will find d in the list and will mark d._panic (this panic) aborted.
   834  		d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   835  
   836  		done := true
   837  		if d.openDefer {
   838  			done = runOpenDeferFrame(gp, d)
   839  			if done && !d._panic.recovered {
   840  				addOneOpenDeferFrame(gp, 0, nil)
   841  			}
   842  		} else {
   843  			p.argp = unsafe.Pointer(getargp())
   844  			d.fn()
   845  		}
   846  		p.argp = nil
   847  
   848  		// Deferred function did not panic. Remove d.
   849  		if gp._defer != d {
   850  			throw("bad defer entry in panic")
   851  		}
   852  		d._panic = nil
   853  
   854  		// trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic
   855  		//GC()
   856  
   857  		pc := d.pc
   858  		sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy
   859  		if done {
   860  			d.fn = nil
   861  			gp._defer = d.link
   862  			freedefer(d)
   863  		}
   864  		if p.recovered {
   865  			gp._panic = p.link
   866  			if gp._panic != nil && gp._panic.goexit && gp._panic.aborted {
   867  				// A normal recover would bypass/abort the Goexit.  Instead,
   868  				// we return to the processing loop of the Goexit.
   869  				gp.sigcode0 = uintptr(gp._panic.sp)
   870  				gp.sigcode1 = uintptr(gp._panic.pc)
   871  				mcall(recovery)
   872  				throw("bypassed recovery failed") // mcall should not return
   873  			}
   874  			atomic.Xadd(&runningPanicDefers, -1)
   875  
   876  			// After a recover, remove any remaining non-started,
   877  			// open-coded defer entries, since the corresponding defers
   878  			// will be executed normally (inline). Any such entry will
   879  			// become stale once we run the corresponding defers inline
   880  			// and exit the associated stack frame. We only remove up to
   881  			// the first started (in-progress) open defer entry, not
   882  			// including the current frame, since any higher entries will
   883  			// be from a higher panic in progress, and will still be
   884  			// needed.
   885  			d := gp._defer
   886  			var prev *_defer
   887  			if !done {
   888  				// Skip our current frame, if not done. It is
   889  				// needed to complete any remaining defers in
   890  				// deferreturn()
   891  				prev = d
   892  				d = d.link
   893  			}
   894  			for d != nil {
   895  				if d.started {
   896  					// This defer is started but we
   897  					// are in the middle of a
   898  					// defer-panic-recover inside of
   899  					// it, so don't remove it or any
   900  					// further defer entries
   901  					break
   902  				}
   903  				if d.openDefer {
   904  					if prev == nil {
   905  						gp._defer = d.link
   906  					} else {
   907  						prev.link = d.link
   908  					}
   909  					newd := d.link
   910  					freedefer(d)
   911  					d = newd
   912  				} else {
   913  					prev = d
   914  					d = d.link
   915  				}
   916  			}
   917  
   918  			gp._panic = p.link
   919  			// Aborted panics are marked but remain on the g.panic list.
   920  			// Remove them from the list.
   921  			for gp._panic != nil && gp._panic.aborted {
   922  				gp._panic = gp._panic.link
   923  			}
   924  			if gp._panic == nil { // must be done with signal
   925  				gp.sig = 0
   926  			}
   927  			// Pass information about recovering frame to recovery.
   928  			gp.sigcode0 = uintptr(sp)
   929  			gp.sigcode1 = pc
   930  			mcall(recovery)
   931  			throw("recovery failed") // mcall should not return
   932  		}
   933  	}
   934  
   935  	// ran out of deferred calls - old-school panic now
   936  	// Because it is unsafe to call arbitrary user code after freezing
   937  	// the world, we call preprintpanics to invoke all necessary Error
   938  	// and String methods to prepare the panic strings before startpanic.
   939  	preprintpanics(gp._panic)
   940  
   941  	fatalpanic(gp._panic) // should not return
   942  	*(*int)(nil) = 0      // not reached
   943  }
   944  
   945  // getargp returns the location where the caller
   946  // writes outgoing function call arguments.
   947  //go:nosplit
   948  //go:noinline
   949  func getargp() uintptr {
   950  	return getcallersp() + sys.MinFrameSize
   951  }
   952  
   953  // The implementation of the predeclared function recover.
   954  // Cannot split the stack because it needs to reliably
   955  // find the stack segment of its caller.
   956  //
   957  // TODO(rsc): Once we commit to CopyStackAlways,
   958  // this doesn't need to be nosplit.
   959  //go:nosplit
   960  func gorecover(argp uintptr) any {
   961  	// Must be in a function running as part of a deferred call during the panic.
   962  	// Must be called from the topmost function of the call
   963  	// (the function used in the defer statement).
   964  	// p.argp is the argument pointer of that topmost deferred function call.
   965  	// Compare against argp reported by caller.
   966  	// If they match, the caller is the one who can recover.
   967  	gp := getg()
   968  	p := gp._panic
   969  	if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) {
   970  		p.recovered = true
   971  		return p.arg
   972  	}
   973  	return nil
   974  }
   975  
   976  //go:linkname sync_throw sync.throw
   977  func sync_throw(s string) {
   978  	throw(s)
   979  }
   980  
   981  //go:nosplit
   982  func throw(s string) {
   983  	// Everything throw does should be recursively nosplit so it
   984  	// can be called even when it's unsafe to grow the stack.
   985  	systemstack(func() {
   986  		print("fatal error: ", s, "\n")
   987  	})
   988  	gp := getg()
   989  	if gp.m.throwing == 0 {
   990  		gp.m.throwing = 1
   991  	}
   992  	fatalthrow()
   993  	*(*int)(nil) = 0 // not reached
   994  }
   995  
   996  // runningPanicDefers is non-zero while running deferred functions for panic.
   997  // runningPanicDefers is incremented and decremented atomically.
   998  // This is used to try hard to get a panic stack trace out when exiting.
   999  var runningPanicDefers uint32
  1000  
  1001  // panicking is non-zero when crashing the program for an unrecovered panic.
  1002  // panicking is incremented and decremented atomically.
  1003  var panicking uint32
  1004  
  1005  // paniclk is held while printing the panic information and stack trace,
  1006  // so that two concurrent panics don't overlap their output.
  1007  var paniclk mutex
  1008  
  1009  // Unwind the stack after a deferred function calls recover
  1010  // after a panic. Then arrange to continue running as though
  1011  // the caller of the deferred function returned normally.
  1012  func recovery(gp *g) {
  1013  	// Info about defer passed in G struct.
  1014  	sp := gp.sigcode0
  1015  	pc := gp.sigcode1
  1016  
  1017  	// d's arguments need to be in the stack.
  1018  	if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
  1019  		print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
  1020  		throw("bad recovery")
  1021  	}
  1022  
  1023  	// Make the deferproc for this d return again,
  1024  	// this time returning 1. The calling function will
  1025  	// jump to the standard return epilogue.
  1026  	gp.sched.sp = sp
  1027  	gp.sched.pc = pc
  1028  	gp.sched.lr = 0
  1029  	gp.sched.ret = 1
  1030  	gogo(&gp.sched)
  1031  }
  1032  
  1033  // fatalthrow implements an unrecoverable runtime throw. It freezes the
  1034  // system, prints stack traces starting from its caller, and terminates the
  1035  // process.
  1036  //
  1037  //go:nosplit
  1038  func fatalthrow() {
  1039  	pc := getcallerpc()
  1040  	sp := getcallersp()
  1041  	gp := getg()
  1042  	// Switch to the system stack to avoid any stack growth, which
  1043  	// may make things worse if the runtime is in a bad state.
  1044  	systemstack(func() {
  1045  		startpanic_m()
  1046  
  1047  		if dopanic_m(gp, pc, sp) {
  1048  			// crash uses a decent amount of nosplit stack and we're already
  1049  			// low on stack in throw, so crash on the system stack (unlike
  1050  			// fatalpanic).
  1051  			crash()
  1052  		}
  1053  
  1054  		exit(2)
  1055  	})
  1056  
  1057  	*(*int)(nil) = 0 // not reached
  1058  }
  1059  
  1060  // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
  1061  // that if msgs != nil, fatalpanic also prints panic messages and decrements
  1062  // runningPanicDefers once main is blocked from exiting.
  1063  //
  1064  //go:nosplit
  1065  func fatalpanic(msgs *_panic) {
  1066  	pc := getcallerpc()
  1067  	sp := getcallersp()
  1068  	gp := getg()
  1069  	var docrash bool
  1070  	// Switch to the system stack to avoid any stack growth, which
  1071  	// may make things worse if the runtime is in a bad state.
  1072  	systemstack(func() {
  1073  		if startpanic_m() && msgs != nil {
  1074  			// There were panic messages and startpanic_m
  1075  			// says it's okay to try to print them.
  1076  
  1077  			// startpanic_m set panicking, which will
  1078  			// block main from exiting, so now OK to
  1079  			// decrement runningPanicDefers.
  1080  			atomic.Xadd(&runningPanicDefers, -1)
  1081  
  1082  			printpanics(msgs)
  1083  		}
  1084  
  1085  		docrash = dopanic_m(gp, pc, sp)
  1086  	})
  1087  
  1088  	if docrash {
  1089  		// By crashing outside the above systemstack call, debuggers
  1090  		// will not be confused when generating a backtrace.
  1091  		// Function crash is marked nosplit to avoid stack growth.
  1092  		crash()
  1093  	}
  1094  
  1095  	systemstack(func() {
  1096  		exit(2)
  1097  	})
  1098  
  1099  	*(*int)(nil) = 0 // not reached
  1100  }
  1101  
  1102  // startpanic_m prepares for an unrecoverable panic.
  1103  //
  1104  // It returns true if panic messages should be printed, or false if
  1105  // the runtime is in bad shape and should just print stacks.
  1106  //
  1107  // It must not have write barriers even though the write barrier
  1108  // explicitly ignores writes once dying > 0. Write barriers still
  1109  // assume that g.m.p != nil, and this function may not have P
  1110  // in some contexts (e.g. a panic in a signal handler for a signal
  1111  // sent to an M with no P).
  1112  //
  1113  //go:nowritebarrierrec
  1114  func startpanic_m() bool {
  1115  	_g_ := getg()
  1116  	if mheap_.cachealloc.size == 0 { // very early
  1117  		print("runtime: panic before malloc heap initialized\n")
  1118  	}
  1119  	// Disallow malloc during an unrecoverable panic. A panic
  1120  	// could happen in a signal handler, or in a throw, or inside
  1121  	// malloc itself. We want to catch if an allocation ever does
  1122  	// happen (even if we're not in one of these situations).
  1123  	_g_.m.mallocing++
  1124  
  1125  	// If we're dying because of a bad lock count, set it to a
  1126  	// good lock count so we don't recursively panic below.
  1127  	if _g_.m.locks < 0 {
  1128  		_g_.m.locks = 1
  1129  	}
  1130  
  1131  	switch _g_.m.dying {
  1132  	case 0:
  1133  		// Setting dying >0 has the side-effect of disabling this G's writebuf.
  1134  		_g_.m.dying = 1
  1135  		atomic.Xadd(&panicking, 1)
  1136  		lock(&paniclk)
  1137  		if debug.schedtrace > 0 || debug.scheddetail > 0 {
  1138  			schedtrace(true)
  1139  		}
  1140  		freezetheworld()
  1141  		return true
  1142  	case 1:
  1143  		// Something failed while panicking.
  1144  		// Just print a stack trace and exit.
  1145  		_g_.m.dying = 2
  1146  		print("panic during panic\n")
  1147  		return false
  1148  	case 2:
  1149  		// This is a genuine bug in the runtime, we couldn't even
  1150  		// print the stack trace successfully.
  1151  		_g_.m.dying = 3
  1152  		print("stack trace unavailable\n")
  1153  		exit(4)
  1154  		fallthrough
  1155  	default:
  1156  		// Can't even print! Just exit.
  1157  		exit(5)
  1158  		return false // Need to return something.
  1159  	}
  1160  }
  1161  
  1162  var didothers bool
  1163  var deadlock mutex
  1164  
  1165  func dopanic_m(gp *g, pc, sp uintptr) bool {
  1166  	if gp.sig != 0 {
  1167  		signame := signame(gp.sig)
  1168  		if signame != "" {
  1169  			print("[signal ", signame)
  1170  		} else {
  1171  			print("[signal ", hex(gp.sig))
  1172  		}
  1173  		print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
  1174  	}
  1175  
  1176  	level, all, docrash := gotraceback()
  1177  	_g_ := getg()
  1178  	if level > 0 {
  1179  		if gp != gp.m.curg {
  1180  			all = true
  1181  		}
  1182  		if gp != gp.m.g0 {
  1183  			print("\n")
  1184  			goroutineheader(gp)
  1185  			traceback(pc, sp, 0, gp)
  1186  		} else if level >= 2 || _g_.m.throwing > 0 {
  1187  			print("\nruntime stack:\n")
  1188  			traceback(pc, sp, 0, gp)
  1189  		}
  1190  		if !didothers && all {
  1191  			didothers = true
  1192  			tracebackothers(gp)
  1193  		}
  1194  	}
  1195  	unlock(&paniclk)
  1196  
  1197  	if atomic.Xadd(&panicking, -1) != 0 {
  1198  		// Some other m is panicking too.
  1199  		// Let it print what it needs to print.
  1200  		// Wait forever without chewing up cpu.
  1201  		// It will exit when it's done.
  1202  		lock(&deadlock)
  1203  		lock(&deadlock)
  1204  	}
  1205  
  1206  	printDebugLog()
  1207  
  1208  	return docrash
  1209  }
  1210  
  1211  // canpanic returns false if a signal should throw instead of
  1212  // panicking.
  1213  //
  1214  //go:nosplit
  1215  func canpanic(gp *g) bool {
  1216  	// Note that g is m->gsignal, different from gp.
  1217  	// Note also that g->m can change at preemption, so m can go stale
  1218  	// if this function ever makes a function call.
  1219  	_g_ := getg()
  1220  	mp := _g_.m
  1221  
  1222  	// Is it okay for gp to panic instead of crashing the program?
  1223  	// Yes, as long as it is running Go code, not runtime code,
  1224  	// and not stuck in a system call.
  1225  	if gp == nil || gp != mp.curg {
  1226  		return false
  1227  	}
  1228  	if mp.locks != 0 || mp.mallocing != 0 || mp.throwing != 0 || mp.preemptoff != "" || mp.dying != 0 {
  1229  		return false
  1230  	}
  1231  	status := readgstatus(gp)
  1232  	if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
  1233  		return false
  1234  	}
  1235  	if GOOS == "windows" && mp.libcallsp != 0 {
  1236  		return false
  1237  	}
  1238  	return true
  1239  }
  1240  
  1241  // shouldPushSigpanic reports whether pc should be used as sigpanic's
  1242  // return PC (pushing a frame for the call). Otherwise, it should be
  1243  // left alone so that LR is used as sigpanic's return PC, effectively
  1244  // replacing the top-most frame with sigpanic. This is used by
  1245  // preparePanic.
  1246  func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
  1247  	if pc == 0 {
  1248  		// Probably a call to a nil func. The old LR is more
  1249  		// useful in the stack trace. Not pushing the frame
  1250  		// will make the trace look like a call to sigpanic
  1251  		// instead. (Otherwise the trace will end at sigpanic
  1252  		// and we won't get to see who faulted.)
  1253  		return false
  1254  	}
  1255  	// If we don't recognize the PC as code, but we do recognize
  1256  	// the link register as code, then this assumes the panic was
  1257  	// caused by a call to non-code. In this case, we want to
  1258  	// ignore this call to make unwinding show the context.
  1259  	//
  1260  	// If we running C code, we're not going to recognize pc as a
  1261  	// Go function, so just assume it's good. Otherwise, traceback
  1262  	// may try to read a stale LR that looks like a Go code
  1263  	// pointer and wander into the woods.
  1264  	if gp.m.incgo || findfunc(pc).valid() {
  1265  		// This wasn't a bad call, so use PC as sigpanic's
  1266  		// return PC.
  1267  		return true
  1268  	}
  1269  	if findfunc(lr).valid() {
  1270  		// This was a bad call, but the LR is good, so use the
  1271  		// LR as sigpanic's return PC.
  1272  		return false
  1273  	}
  1274  	// Neither the PC or LR is good. Hopefully pushing a frame
  1275  	// will work.
  1276  	return true
  1277  }
  1278  
  1279  // isAbortPC reports whether pc is the program counter at which
  1280  // runtime.abort raises a signal.
  1281  //
  1282  // It is nosplit because it's part of the isgoexception
  1283  // implementation.
  1284  //
  1285  //go:nosplit
  1286  func isAbortPC(pc uintptr) bool {
  1287  	f := findfunc(pc)
  1288  	if !f.valid() {
  1289  		return false
  1290  	}
  1291  	return f.funcID == funcID_abort
  1292  }
  1293  

View as plain text