Source file src/runtime/chan.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go channels.
     8  
     9  // Invariants:
    10  //  At least one of c.sendq and c.recvq is empty,
    11  //  except for the case of an unbuffered channel with a single goroutine
    12  //  blocked on it for both sending and receiving using a select statement,
    13  //  in which case the length of c.sendq and c.recvq is limited only by the
    14  //  size of the select statement.
    15  //
    16  // For buffered channels, also:
    17  //  c.qcount > 0 implies that c.recvq is empty.
    18  //  c.qcount < c.dataqsiz implies that c.sendq is empty.
    19  
    20  import (
    21  	"internal/abi"
    22  	"runtime/internal/atomic"
    23  	"runtime/internal/math"
    24  	"unsafe"
    25  )
    26  
    27  const (
    28  	maxAlign  = 8
    29  	hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
    30  	debugChan = false
    31  )
    32  
    33  type hchan struct {
    34  	qcount   uint           // total data in the queue
    35  	dataqsiz uint           // size of the circular queue
    36  	buf      unsafe.Pointer // points to an array of dataqsiz elements
    37  	elemsize uint16
    38  	closed   uint32
    39  	elemtype *_type // element type
    40  	sendx    uint   // send index
    41  	recvx    uint   // receive index
    42  	recvq    waitq  // list of recv waiters
    43  	sendq    waitq  // list of send waiters
    44  
    45  	// lock protects all fields in hchan, as well as several
    46  	// fields in sudogs blocked on this channel.
    47  	//
    48  	// Do not change another G's status while holding this lock
    49  	// (in particular, do not ready a G), as this can deadlock
    50  	// with stack shrinking.
    51  	lock mutex
    52  }
    53  
    54  type waitq struct {
    55  	first *sudog
    56  	last  *sudog
    57  }
    58  
    59  //go:linkname reflect_makechan reflect.makechan
    60  func reflect_makechan(t *chantype, size int) *hchan {
    61  	return makechan(t, size)
    62  }
    63  
    64  func makechan64(t *chantype, size int64) *hchan {
    65  	if int64(int(size)) != size {
    66  		panic(plainError("makechan: size out of range"))
    67  	}
    68  
    69  	return makechan(t, int(size))
    70  }
    71  
    72  func makechan(t *chantype, size int) *hchan {
    73  	elem := t.elem
    74  
    75  	// compiler checks this but be safe.
    76  	if elem.size >= 1<<16 {
    77  		throw("makechan: invalid channel element type")
    78  	}
    79  	if hchanSize%maxAlign != 0 || elem.align > maxAlign {
    80  		throw("makechan: bad alignment")
    81  	}
    82  
    83  	mem, overflow := math.MulUintptr(elem.size, uintptr(size))
    84  	if overflow || mem > maxAlloc-hchanSize || size < 0 {
    85  		panic(plainError("makechan: size out of range"))
    86  	}
    87  
    88  	// Hchan does not contain pointers interesting for GC when elements stored in buf do not contain pointers.
    89  	// buf points into the same allocation, elemtype is persistent.
    90  	// SudoG's are referenced from their owning thread so they can't be collected.
    91  	// TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
    92  	var c *hchan
    93  	switch {
    94  	case mem == 0:
    95  		// Queue or element size is zero.
    96  		c = (*hchan)(mallocgc(hchanSize, nil, true))
    97  		// Race detector uses this location for synchronization.
    98  		c.buf = c.raceaddr()
    99  	case elem.ptrdata == 0:
   100  		// Elements do not contain pointers.
   101  		// Allocate hchan and buf in one call.
   102  		c = (*hchan)(mallocgc(hchanSize+mem, nil, true))
   103  		c.buf = add(unsafe.Pointer(c), hchanSize)
   104  	default:
   105  		// Elements contain pointers.
   106  		c = new(hchan)
   107  		c.buf = mallocgc(mem, elem, true)
   108  	}
   109  
   110  	c.elemsize = uint16(elem.size)
   111  	c.elemtype = elem
   112  	c.dataqsiz = uint(size)
   113  	lockInit(&c.lock, lockRankHchan)
   114  
   115  	if debugChan {
   116  		print("makechan: chan=", c, "; elemsize=", elem.size, "; dataqsiz=", size, "\n")
   117  	}
   118  	return c
   119  }
   120  
   121  // chanbuf(c, i) is pointer to the i'th slot in the buffer.
   122  func chanbuf(c *hchan, i uint) unsafe.Pointer {
   123  	return add(c.buf, uintptr(i)*uintptr(c.elemsize))
   124  }
   125  
   126  // full reports whether a send on c would block (that is, the channel is full).
   127  // It uses a single word-sized read of mutable state, so although
   128  // the answer is instantaneously true, the correct answer may have changed
   129  // by the time the calling function receives the return value.
   130  func full(c *hchan) bool {
   131  	// c.dataqsiz is immutable (never written after the channel is created)
   132  	// so it is safe to read at any time during channel operation.
   133  	if c.dataqsiz == 0 {
   134  		// Assumes that a pointer read is relaxed-atomic.
   135  		return c.recvq.first == nil
   136  	}
   137  	// Assumes that a uint read is relaxed-atomic.
   138  	return c.qcount == c.dataqsiz
   139  }
   140  
   141  // entry point for c <- x from compiled code
   142  //go:nosplit
   143  func chansend1(c *hchan, elem unsafe.Pointer) {
   144  	chansend(c, elem, true, getcallerpc())
   145  }
   146  
   147  /*
   148   * generic single channel send/recv
   149   * If block is not nil,
   150   * then the protocol will not
   151   * sleep but return if it could
   152   * not complete.
   153   *
   154   * sleep can wake up with g.param == nil
   155   * when a channel involved in the sleep has
   156   * been closed.  it is easiest to loop and re-run
   157   * the operation; we'll see that it's now closed.
   158   */
   159  func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
   160  	if c == nil {
   161  		if !block {
   162  			return false
   163  		}
   164  		gopark(nil, nil, waitReasonChanSendNilChan, traceEvGoStop, 2)
   165  		throw("unreachable")
   166  	}
   167  
   168  	if debugChan {
   169  		print("chansend: chan=", c, "\n")
   170  	}
   171  
   172  	if raceenabled {
   173  		racereadpc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(chansend))
   174  	}
   175  
   176  	// Fast path: check for failed non-blocking operation without acquiring the lock.
   177  	//
   178  	// After observing that the channel is not closed, we observe that the channel is
   179  	// not ready for sending. Each of these observations is a single word-sized read
   180  	// (first c.closed and second full()).
   181  	// Because a closed channel cannot transition from 'ready for sending' to
   182  	// 'not ready for sending', even if the channel is closed between the two observations,
   183  	// they imply a moment between the two when the channel was both not yet closed
   184  	// and not ready for sending. We behave as if we observed the channel at that moment,
   185  	// and report that the send cannot proceed.
   186  	//
   187  	// It is okay if the reads are reordered here: if we observe that the channel is not
   188  	// ready for sending and then observe that it is not closed, that implies that the
   189  	// channel wasn't closed during the first observation. However, nothing here
   190  	// guarantees forward progress. We rely on the side effects of lock release in
   191  	// chanrecv() and closechan() to update this thread's view of c.closed and full().
   192  	if !block && c.closed == 0 && full(c) {
   193  		return false
   194  	}
   195  
   196  	var t0 int64
   197  	if blockprofilerate > 0 {
   198  		t0 = cputicks()
   199  	}
   200  
   201  	lock(&c.lock)
   202  
   203  	if c.closed != 0 {
   204  		unlock(&c.lock)
   205  		panic(plainError("send on closed channel"))
   206  	}
   207  
   208  	if sg := c.recvq.dequeue(); sg != nil {
   209  		// Found a waiting receiver. We pass the value we want to send
   210  		// directly to the receiver, bypassing the channel buffer (if any).
   211  		send(c, sg, ep, func() { unlock(&c.lock) }, 3)
   212  		return true
   213  	}
   214  
   215  	if c.qcount < c.dataqsiz {
   216  		// Space is available in the channel buffer. Enqueue the element to send.
   217  		qp := chanbuf(c, c.sendx)
   218  		if raceenabled {
   219  			racenotify(c, c.sendx, nil)
   220  		}
   221  		typedmemmove(c.elemtype, qp, ep)
   222  		c.sendx++
   223  		if c.sendx == c.dataqsiz {
   224  			c.sendx = 0
   225  		}
   226  		c.qcount++
   227  		unlock(&c.lock)
   228  		return true
   229  	}
   230  
   231  	if !block {
   232  		unlock(&c.lock)
   233  		return false
   234  	}
   235  
   236  	// Block on the channel. Some receiver will complete our operation for us.
   237  	gp := getg()
   238  	mysg := acquireSudog()
   239  	mysg.releasetime = 0
   240  	if t0 != 0 {
   241  		mysg.releasetime = -1
   242  	}
   243  	// No stack splits between assigning elem and enqueuing mysg
   244  	// on gp.waiting where copystack can find it.
   245  	mysg.elem = ep
   246  	mysg.waitlink = nil
   247  	mysg.g = gp
   248  	mysg.isSelect = false
   249  	mysg.c = c
   250  	gp.waiting = mysg
   251  	gp.param = nil
   252  	c.sendq.enqueue(mysg)
   253  	// Signal to anyone trying to shrink our stack that we're about
   254  	// to park on a channel. The window between when this G's status
   255  	// changes and when we set gp.activeStackChans is not safe for
   256  	// stack shrinking.
   257  	atomic.Store8(&gp.parkingOnChan, 1)
   258  	gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2)
   259  	// Ensure the value being sent is kept alive until the
   260  	// receiver copies it out. The sudog has a pointer to the
   261  	// stack object, but sudogs aren't considered as roots of the
   262  	// stack tracer.
   263  	KeepAlive(ep)
   264  
   265  	// someone woke us up.
   266  	if mysg != gp.waiting {
   267  		throw("G waiting list is corrupted")
   268  	}
   269  	gp.waiting = nil
   270  	gp.activeStackChans = false
   271  	closed := !mysg.success
   272  	gp.param = nil
   273  	if mysg.releasetime > 0 {
   274  		blockevent(mysg.releasetime-t0, 2)
   275  	}
   276  	mysg.c = nil
   277  	releaseSudog(mysg)
   278  	if closed {
   279  		if c.closed == 0 {
   280  			throw("chansend: spurious wakeup")
   281  		}
   282  		panic(plainError("send on closed channel"))
   283  	}
   284  	return true
   285  }
   286  
   287  // send processes a send operation on an empty channel c.
   288  // The value ep sent by the sender is copied to the receiver sg.
   289  // The receiver is then woken up to go on its merry way.
   290  // Channel c must be empty and locked.  send unlocks c with unlockf.
   291  // sg must already be dequeued from c.
   292  // ep must be non-nil and point to the heap or the caller's stack.
   293  func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
   294  	if raceenabled {
   295  		if c.dataqsiz == 0 {
   296  			racesync(c, sg)
   297  		} else {
   298  			// Pretend we go through the buffer, even though
   299  			// we copy directly. Note that we need to increment
   300  			// the head/tail locations only when raceenabled.
   301  			racenotify(c, c.recvx, nil)
   302  			racenotify(c, c.recvx, sg)
   303  			c.recvx++
   304  			if c.recvx == c.dataqsiz {
   305  				c.recvx = 0
   306  			}
   307  			c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
   308  		}
   309  	}
   310  	if sg.elem != nil {
   311  		sendDirect(c.elemtype, sg, ep)
   312  		sg.elem = nil
   313  	}
   314  	gp := sg.g
   315  	unlockf()
   316  	gp.param = unsafe.Pointer(sg)
   317  	sg.success = true
   318  	if sg.releasetime != 0 {
   319  		sg.releasetime = cputicks()
   320  	}
   321  	goready(gp, skip+1)
   322  }
   323  
   324  // Sends and receives on unbuffered or empty-buffered channels are the
   325  // only operations where one running goroutine writes to the stack of
   326  // another running goroutine. The GC assumes that stack writes only
   327  // happen when the goroutine is running and are only done by that
   328  // goroutine. Using a write barrier is sufficient to make up for
   329  // violating that assumption, but the write barrier has to work.
   330  // typedmemmove will call bulkBarrierPreWrite, but the target bytes
   331  // are not in the heap, so that will not help. We arrange to call
   332  // memmove and typeBitsBulkBarrier instead.
   333  
   334  func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
   335  	// src is on our stack, dst is a slot on another stack.
   336  
   337  	// Once we read sg.elem out of sg, it will no longer
   338  	// be updated if the destination's stack gets copied (shrunk).
   339  	// So make sure that no preemption points can happen between read & use.
   340  	dst := sg.elem
   341  	typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
   342  	// No need for cgo write barrier checks because dst is always
   343  	// Go memory.
   344  	memmove(dst, src, t.size)
   345  }
   346  
   347  func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) {
   348  	// dst is on our stack or the heap, src is on another stack.
   349  	// The channel is locked, so src will not move during this
   350  	// operation.
   351  	src := sg.elem
   352  	typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
   353  	memmove(dst, src, t.size)
   354  }
   355  
   356  func closechan(c *hchan) {
   357  	if c == nil {
   358  		panic(plainError("close of nil channel"))
   359  	}
   360  
   361  	lock(&c.lock)
   362  	if c.closed != 0 {
   363  		unlock(&c.lock)
   364  		panic(plainError("close of closed channel"))
   365  	}
   366  
   367  	if raceenabled {
   368  		callerpc := getcallerpc()
   369  		racewritepc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(closechan))
   370  		racerelease(c.raceaddr())
   371  	}
   372  
   373  	c.closed = 1
   374  
   375  	var glist gList
   376  
   377  	// release all readers
   378  	for {
   379  		sg := c.recvq.dequeue()
   380  		if sg == nil {
   381  			break
   382  		}
   383  		if sg.elem != nil {
   384  			typedmemclr(c.elemtype, sg.elem)
   385  			sg.elem = nil
   386  		}
   387  		if sg.releasetime != 0 {
   388  			sg.releasetime = cputicks()
   389  		}
   390  		gp := sg.g
   391  		gp.param = unsafe.Pointer(sg)
   392  		sg.success = false
   393  		if raceenabled {
   394  			raceacquireg(gp, c.raceaddr())
   395  		}
   396  		glist.push(gp)
   397  	}
   398  
   399  	// release all writers (they will panic)
   400  	for {
   401  		sg := c.sendq.dequeue()
   402  		if sg == nil {
   403  			break
   404  		}
   405  		sg.elem = nil
   406  		if sg.releasetime != 0 {
   407  			sg.releasetime = cputicks()
   408  		}
   409  		gp := sg.g
   410  		gp.param = unsafe.Pointer(sg)
   411  		sg.success = false
   412  		if raceenabled {
   413  			raceacquireg(gp, c.raceaddr())
   414  		}
   415  		glist.push(gp)
   416  	}
   417  	unlock(&c.lock)
   418  
   419  	// Ready all Gs now that we've dropped the channel lock.
   420  	for !glist.empty() {
   421  		gp := glist.pop()
   422  		gp.schedlink = 0
   423  		goready(gp, 3)
   424  	}
   425  }
   426  
   427  // empty reports whether a read from c would block (that is, the channel is
   428  // empty).  It uses a single atomic read of mutable state.
   429  func empty(c *hchan) bool {
   430  	// c.dataqsiz is immutable.
   431  	if c.dataqsiz == 0 {
   432  		return atomic.Loadp(unsafe.Pointer(&c.sendq.first)) == nil
   433  	}
   434  	return atomic.Loaduint(&c.qcount) == 0
   435  }
   436  
   437  // entry points for <- c from compiled code
   438  //go:nosplit
   439  func chanrecv1(c *hchan, elem unsafe.Pointer) {
   440  	chanrecv(c, elem, true)
   441  }
   442  
   443  //go:nosplit
   444  func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool) {
   445  	_, received = chanrecv(c, elem, true)
   446  	return
   447  }
   448  
   449  // chanrecv receives on channel c and writes the received data to ep.
   450  // ep may be nil, in which case received data is ignored.
   451  // If block == false and no elements are available, returns (false, false).
   452  // Otherwise, if c is closed, zeros *ep and returns (true, false).
   453  // Otherwise, fills in *ep with an element and returns (true, true).
   454  // A non-nil ep must point to the heap or the caller's stack.
   455  func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
   456  	// raceenabled: don't need to check ep, as it is always on the stack
   457  	// or is new memory allocated by reflect.
   458  
   459  	if debugChan {
   460  		print("chanrecv: chan=", c, "\n")
   461  	}
   462  
   463  	if c == nil {
   464  		if !block {
   465  			return
   466  		}
   467  		gopark(nil, nil, waitReasonChanReceiveNilChan, traceEvGoStop, 2)
   468  		throw("unreachable")
   469  	}
   470  
   471  	// Fast path: check for failed non-blocking operation without acquiring the lock.
   472  	if !block && empty(c) {
   473  		// After observing that the channel is not ready for receiving, we observe whether the
   474  		// channel is closed.
   475  		//
   476  		// Reordering of these checks could lead to incorrect behavior when racing with a close.
   477  		// For example, if the channel was open and not empty, was closed, and then drained,
   478  		// reordered reads could incorrectly indicate "open and empty". To prevent reordering,
   479  		// we use atomic loads for both checks, and rely on emptying and closing to happen in
   480  		// separate critical sections under the same lock.  This assumption fails when closing
   481  		// an unbuffered channel with a blocked send, but that is an error condition anyway.
   482  		if atomic.Load(&c.closed) == 0 {
   483  			// Because a channel cannot be reopened, the later observation of the channel
   484  			// being not closed implies that it was also not closed at the moment of the
   485  			// first observation. We behave as if we observed the channel at that moment
   486  			// and report that the receive cannot proceed.
   487  			return
   488  		}
   489  		// The channel is irreversibly closed. Re-check whether the channel has any pending data
   490  		// to receive, which could have arrived between the empty and closed checks above.
   491  		// Sequential consistency is also required here, when racing with such a send.
   492  		if empty(c) {
   493  			// The channel is irreversibly closed and empty.
   494  			if raceenabled {
   495  				raceacquire(c.raceaddr())
   496  			}
   497  			if ep != nil {
   498  				typedmemclr(c.elemtype, ep)
   499  			}
   500  			return true, false
   501  		}
   502  	}
   503  
   504  	var t0 int64
   505  	if blockprofilerate > 0 {
   506  		t0 = cputicks()
   507  	}
   508  
   509  	lock(&c.lock)
   510  
   511  	if c.closed != 0 && c.qcount == 0 {
   512  		if raceenabled {
   513  			raceacquire(c.raceaddr())
   514  		}
   515  		unlock(&c.lock)
   516  		if ep != nil {
   517  			typedmemclr(c.elemtype, ep)
   518  		}
   519  		return true, false
   520  	}
   521  
   522  	if sg := c.sendq.dequeue(); sg != nil {
   523  		// Found a waiting sender. If buffer is size 0, receive value
   524  		// directly from sender. Otherwise, receive from head of queue
   525  		// and add sender's value to the tail of the queue (both map to
   526  		// the same buffer slot because the queue is full).
   527  		recv(c, sg, ep, func() { unlock(&c.lock) }, 3)
   528  		return true, true
   529  	}
   530  
   531  	if c.qcount > 0 {
   532  		// Receive directly from queue
   533  		qp := chanbuf(c, c.recvx)
   534  		if raceenabled {
   535  			racenotify(c, c.recvx, nil)
   536  		}
   537  		if ep != nil {
   538  			typedmemmove(c.elemtype, ep, qp)
   539  		}
   540  		typedmemclr(c.elemtype, qp)
   541  		c.recvx++
   542  		if c.recvx == c.dataqsiz {
   543  			c.recvx = 0
   544  		}
   545  		c.qcount--
   546  		unlock(&c.lock)
   547  		return true, true
   548  	}
   549  
   550  	if !block {
   551  		unlock(&c.lock)
   552  		return false, false
   553  	}
   554  
   555  	// no sender available: block on this channel.
   556  	gp := getg()
   557  	mysg := acquireSudog()
   558  	mysg.releasetime = 0
   559  	if t0 != 0 {
   560  		mysg.releasetime = -1
   561  	}
   562  	// No stack splits between assigning elem and enqueuing mysg
   563  	// on gp.waiting where copystack can find it.
   564  	mysg.elem = ep
   565  	mysg.waitlink = nil
   566  	gp.waiting = mysg
   567  	mysg.g = gp
   568  	mysg.isSelect = false
   569  	mysg.c = c
   570  	gp.param = nil
   571  	c.recvq.enqueue(mysg)
   572  	// Signal to anyone trying to shrink our stack that we're about
   573  	// to park on a channel. The window between when this G's status
   574  	// changes and when we set gp.activeStackChans is not safe for
   575  	// stack shrinking.
   576  	atomic.Store8(&gp.parkingOnChan, 1)
   577  	gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2)
   578  
   579  	// someone woke us up
   580  	if mysg != gp.waiting {
   581  		throw("G waiting list is corrupted")
   582  	}
   583  	gp.waiting = nil
   584  	gp.activeStackChans = false
   585  	if mysg.releasetime > 0 {
   586  		blockevent(mysg.releasetime-t0, 2)
   587  	}
   588  	success := mysg.success
   589  	gp.param = nil
   590  	mysg.c = nil
   591  	releaseSudog(mysg)
   592  	return true, success
   593  }
   594  
   595  // recv processes a receive operation on a full channel c.
   596  // There are 2 parts:
   597  // 1) The value sent by the sender sg is put into the channel
   598  //    and the sender is woken up to go on its merry way.
   599  // 2) The value received by the receiver (the current G) is
   600  //    written to ep.
   601  // For synchronous channels, both values are the same.
   602  // For asynchronous channels, the receiver gets its data from
   603  // the channel buffer and the sender's data is put in the
   604  // channel buffer.
   605  // Channel c must be full and locked. recv unlocks c with unlockf.
   606  // sg must already be dequeued from c.
   607  // A non-nil ep must point to the heap or the caller's stack.
   608  func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
   609  	if c.dataqsiz == 0 {
   610  		if raceenabled {
   611  			racesync(c, sg)
   612  		}
   613  		if ep != nil {
   614  			// copy data from sender
   615  			recvDirect(c.elemtype, sg, ep)
   616  		}
   617  	} else {
   618  		// Queue is full. Take the item at the
   619  		// head of the queue. Make the sender enqueue
   620  		// its item at the tail of the queue. Since the
   621  		// queue is full, those are both the same slot.
   622  		qp := chanbuf(c, c.recvx)
   623  		if raceenabled {
   624  			racenotify(c, c.recvx, nil)
   625  			racenotify(c, c.recvx, sg)
   626  		}
   627  		// copy data from queue to receiver
   628  		if ep != nil {
   629  			typedmemmove(c.elemtype, ep, qp)
   630  		}
   631  		// copy data from sender to queue
   632  		typedmemmove(c.elemtype, qp, sg.elem)
   633  		c.recvx++
   634  		if c.recvx == c.dataqsiz {
   635  			c.recvx = 0
   636  		}
   637  		c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
   638  	}
   639  	sg.elem = nil
   640  	gp := sg.g
   641  	unlockf()
   642  	gp.param = unsafe.Pointer(sg)
   643  	sg.success = true
   644  	if sg.releasetime != 0 {
   645  		sg.releasetime = cputicks()
   646  	}
   647  	goready(gp, skip+1)
   648  }
   649  
   650  func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool {
   651  	// There are unlocked sudogs that point into gp's stack. Stack
   652  	// copying must lock the channels of those sudogs.
   653  	// Set activeStackChans here instead of before we try parking
   654  	// because we could self-deadlock in stack growth on the
   655  	// channel lock.
   656  	gp.activeStackChans = true
   657  	// Mark that it's safe for stack shrinking to occur now,
   658  	// because any thread acquiring this G's stack for shrinking
   659  	// is guaranteed to observe activeStackChans after this store.
   660  	atomic.Store8(&gp.parkingOnChan, 0)
   661  	// Make sure we unlock after setting activeStackChans and
   662  	// unsetting parkingOnChan. The moment we unlock chanLock
   663  	// we risk gp getting readied by a channel operation and
   664  	// so gp could continue running before everything before
   665  	// the unlock is visible (even to gp itself).
   666  	unlock((*mutex)(chanLock))
   667  	return true
   668  }
   669  
   670  // compiler implements
   671  //
   672  //	select {
   673  //	case c <- v:
   674  //		... foo
   675  //	default:
   676  //		... bar
   677  //	}
   678  //
   679  // as
   680  //
   681  //	if selectnbsend(c, v) {
   682  //		... foo
   683  //	} else {
   684  //		... bar
   685  //	}
   686  //
   687  func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool) {
   688  	return chansend(c, elem, false, getcallerpc())
   689  }
   690  
   691  // compiler implements
   692  //
   693  //	select {
   694  //	case v, ok = <-c:
   695  //		... foo
   696  //	default:
   697  //		... bar
   698  //	}
   699  //
   700  // as
   701  //
   702  //	if selected, ok = selectnbrecv(&v, c); selected {
   703  //		... foo
   704  //	} else {
   705  //		... bar
   706  //	}
   707  //
   708  func selectnbrecv(elem unsafe.Pointer, c *hchan) (selected, received bool) {
   709  	return chanrecv(c, elem, false)
   710  }
   711  
   712  //go:linkname reflect_chansend reflect.chansend
   713  func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
   714  	return chansend(c, elem, !nb, getcallerpc())
   715  }
   716  
   717  //go:linkname reflect_chanrecv reflect.chanrecv
   718  func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
   719  	return chanrecv(c, elem, !nb)
   720  }
   721  
   722  //go:linkname reflect_chanlen reflect.chanlen
   723  func reflect_chanlen(c *hchan) int {
   724  	if c == nil {
   725  		return 0
   726  	}
   727  	return int(c.qcount)
   728  }
   729  
   730  //go:linkname reflectlite_chanlen internal/reflectlite.chanlen
   731  func reflectlite_chanlen(c *hchan) int {
   732  	if c == nil {
   733  		return 0
   734  	}
   735  	return int(c.qcount)
   736  }
   737  
   738  //go:linkname reflect_chancap reflect.chancap
   739  func reflect_chancap(c *hchan) int {
   740  	if c == nil {
   741  		return 0
   742  	}
   743  	return int(c.dataqsiz)
   744  }
   745  
   746  //go:linkname reflect_chanclose reflect.chanclose
   747  func reflect_chanclose(c *hchan) {
   748  	closechan(c)
   749  }
   750  
   751  func (q *waitq) enqueue(sgp *sudog) {
   752  	sgp.next = nil
   753  	x := q.last
   754  	if x == nil {
   755  		sgp.prev = nil
   756  		q.first = sgp
   757  		q.last = sgp
   758  		return
   759  	}
   760  	sgp.prev = x
   761  	x.next = sgp
   762  	q.last = sgp
   763  }
   764  
   765  func (q *waitq) dequeue() *sudog {
   766  	for {
   767  		sgp := q.first
   768  		if sgp == nil {
   769  			return nil
   770  		}
   771  		y := sgp.next
   772  		if y == nil {
   773  			q.first = nil
   774  			q.last = nil
   775  		} else {
   776  			y.prev = nil
   777  			q.first = y
   778  			sgp.next = nil // mark as removed (see dequeueSudog)
   779  		}
   780  
   781  		// if a goroutine was put on this queue because of a
   782  		// select, there is a small window between the goroutine
   783  		// being woken up by a different case and it grabbing the
   784  		// channel locks. Once it has the lock
   785  		// it removes itself from the queue, so we won't see it after that.
   786  		// We use a flag in the G struct to tell us when someone
   787  		// else has won the race to signal this goroutine but the goroutine
   788  		// hasn't removed itself from the queue yet.
   789  		if sgp.isSelect && !atomic.Cas(&sgp.g.selectDone, 0, 1) {
   790  			continue
   791  		}
   792  
   793  		return sgp
   794  	}
   795  }
   796  
   797  func (c *hchan) raceaddr() unsafe.Pointer {
   798  	// Treat read-like and write-like operations on the channel to
   799  	// happen at this address. Avoid using the address of qcount
   800  	// or dataqsiz, because the len() and cap() builtins read
   801  	// those addresses, and we don't want them racing with
   802  	// operations like close().
   803  	return unsafe.Pointer(&c.buf)
   804  }
   805  
   806  func racesync(c *hchan, sg *sudog) {
   807  	racerelease(chanbuf(c, 0))
   808  	raceacquireg(sg.g, chanbuf(c, 0))
   809  	racereleaseg(sg.g, chanbuf(c, 0))
   810  	raceacquire(chanbuf(c, 0))
   811  }
   812  
   813  // Notify the race detector of a send or receive involving buffer entry idx
   814  // and a channel c or its communicating partner sg.
   815  // This function handles the special case of c.elemsize==0.
   816  func racenotify(c *hchan, idx uint, sg *sudog) {
   817  	// We could have passed the unsafe.Pointer corresponding to entry idx
   818  	// instead of idx itself.  However, in a future version of this function,
   819  	// we can use idx to better handle the case of elemsize==0.
   820  	// A future improvement to the detector is to call TSan with c and idx:
   821  	// this way, Go will continue to not allocating buffer entries for channels
   822  	// of elemsize==0, yet the race detector can be made to handle multiple
   823  	// sync objects underneath the hood (one sync object per idx)
   824  	qp := chanbuf(c, idx)
   825  	// When elemsize==0, we don't allocate a full buffer for the channel.
   826  	// Instead of individual buffer entries, the race detector uses the
   827  	// c.buf as the only buffer entry.  This simplification prevents us from
   828  	// following the memory model's happens-before rules (rules that are
   829  	// implemented in racereleaseacquire).  Instead, we accumulate happens-before
   830  	// information in the synchronization object associated with c.buf.
   831  	if c.elemsize == 0 {
   832  		if sg == nil {
   833  			raceacquire(qp)
   834  			racerelease(qp)
   835  		} else {
   836  			raceacquireg(sg.g, qp)
   837  			racereleaseg(sg.g, qp)
   838  		}
   839  	} else {
   840  		if sg == nil {
   841  			racereleaseacquire(qp)
   842  		} else {
   843  			racereleaseacquireg(sg.g, qp)
   844  		}
   845  	}
   846  }
   847  

View as plain text