Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/chan.go

Documentation: runtime

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go channels.
     8  
     9  // Invariants:
    10  //  At least one of c.sendq and c.recvq is empty,
    11  //  except for the case of an unbuffered channel with a single goroutine
    12  //  blocked on it for both sending and receiving using a select statement,
    13  //  in which case the length of c.sendq and c.recvq is limited only by the
    14  //  size of the select statement.
    15  //
    16  // For buffered channels, also:
    17  //  c.qcount > 0 implies that c.recvq is empty.
    18  //  c.qcount < c.dataqsiz implies that c.sendq is empty.
    19  
    20  import (
    21  	"runtime/internal/atomic"
    22  	"runtime/internal/math"
    23  	"unsafe"
    24  )
    25  
    26  const (
    27  	maxAlign  = 8
    28  	hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
    29  	debugChan = false
    30  )
    31  
    32  type hchan struct {
    33  	qcount   uint           // total data in the queue
    34  	dataqsiz uint           // size of the circular queue
    35  	buf      unsafe.Pointer // points to an array of dataqsiz elements
    36  	elemsize uint16
    37  	closed   uint32
    38  	elemtype *_type // element type
    39  	sendx    uint   // send index
    40  	recvx    uint   // receive index
    41  	recvq    waitq  // list of recv waiters
    42  	sendq    waitq  // list of send waiters
    43  
    44  	// lock protects all fields in hchan, as well as several
    45  	// fields in sudogs blocked on this channel.
    46  	//
    47  	// Do not change another G's status while holding this lock
    48  	// (in particular, do not ready a G), as this can deadlock
    49  	// with stack shrinking.
    50  	lock mutex
    51  }
    52  
    53  type waitq struct {
    54  	first *sudog
    55  	last  *sudog
    56  }
    57  
    58  //go:linkname reflect_makechan reflect.makechan
    59  func reflect_makechan(t *chantype, size int) *hchan {
    60  	return makechan(t, size)
    61  }
    62  
    63  func makechan64(t *chantype, size int64) *hchan {
    64  	if int64(int(size)) != size {
    65  		panic(plainError("makechan: size out of range"))
    66  	}
    67  
    68  	return makechan(t, int(size))
    69  }
    70  
    71  func makechan(t *chantype, size int) *hchan {
    72  	elem := t.elem
    73  
    74  	// compiler checks this but be safe.
    75  	if elem.size >= 1<<16 {
    76  		throw("makechan: invalid channel element type")
    77  	}
    78  	if hchanSize%maxAlign != 0 || elem.align > maxAlign {
    79  		throw("makechan: bad alignment")
    80  	}
    81  
    82  	mem, overflow := math.MulUintptr(elem.size, uintptr(size))
    83  	if overflow || mem > maxAlloc-hchanSize || size < 0 {
    84  		panic(plainError("makechan: size out of range"))
    85  	}
    86  
    87  	// Hchan does not contain pointers interesting for GC when elements stored in buf do not contain pointers.
    88  	// buf points into the same allocation, elemtype is persistent.
    89  	// SudoG's are referenced from their owning thread so they can't be collected.
    90  	// TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
    91  	var c *hchan
    92  	switch {
    93  	case mem == 0:
    94  		// Queue or element size is zero.
    95  		c = (*hchan)(mallocgc(hchanSize, nil, true))
    96  		// Race detector uses this location for synchronization.
    97  		c.buf = c.raceaddr()
    98  	case elem.ptrdata == 0:
    99  		// Elements do not contain pointers.
   100  		// Allocate hchan and buf in one call.
   101  		c = (*hchan)(mallocgc(hchanSize+mem, nil, true))
   102  		c.buf = add(unsafe.Pointer(c), hchanSize)
   103  	default:
   104  		// Elements contain pointers.
   105  		c = new(hchan)
   106  		c.buf = mallocgc(mem, elem, true)
   107  	}
   108  
   109  	c.elemsize = uint16(elem.size)
   110  	c.elemtype = elem
   111  	c.dataqsiz = uint(size)
   112  	lockInit(&c.lock, lockRankHchan)
   113  
   114  	if debugChan {
   115  		print("makechan: chan=", c, "; elemsize=", elem.size, "; dataqsiz=", size, "\n")
   116  	}
   117  	return c
   118  }
   119  
   120  // chanbuf(c, i) is pointer to the i'th slot in the buffer.
   121  func chanbuf(c *hchan, i uint) unsafe.Pointer {
   122  	return add(c.buf, uintptr(i)*uintptr(c.elemsize))
   123  }
   124  
   125  // full reports whether a send on c would block (that is, the channel is full).
   126  // It uses a single word-sized read of mutable state, so although
   127  // the answer is instantaneously true, the correct answer may have changed
   128  // by the time the calling function receives the return value.
   129  func full(c *hchan) bool {
   130  	// c.dataqsiz is immutable (never written after the channel is created)
   131  	// so it is safe to read at any time during channel operation.
   132  	if c.dataqsiz == 0 {
   133  		// Assumes that a pointer read is relaxed-atomic.
   134  		return c.recvq.first == nil
   135  	}
   136  	// Assumes that a uint read is relaxed-atomic.
   137  	return c.qcount == c.dataqsiz
   138  }
   139  
   140  // entry point for c <- x from compiled code
   141  //go:nosplit
   142  func chansend1(c *hchan, elem unsafe.Pointer) {
   143  	chansend(c, elem, true, getcallerpc())
   144  }
   145  
   146  /*
   147   * generic single channel send/recv
   148   * If block is not nil,
   149   * then the protocol will not
   150   * sleep but return if it could
   151   * not complete.
   152   *
   153   * sleep can wake up with g.param == nil
   154   * when a channel involved in the sleep has
   155   * been closed.  it is easiest to loop and re-run
   156   * the operation; we'll see that it's now closed.
   157   */
   158  func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
   159  	if c == nil {
   160  		if !block {
   161  			return false
   162  		}
   163  		gopark(nil, nil, waitReasonChanSendNilChan, traceEvGoStop, 2)
   164  		throw("unreachable")
   165  	}
   166  
   167  	if debugChan {
   168  		print("chansend: chan=", c, "\n")
   169  	}
   170  
   171  	if raceenabled {
   172  		racereadpc(c.raceaddr(), callerpc, funcPC(chansend))
   173  	}
   174  
   175  	// Fast path: check for failed non-blocking operation without acquiring the lock.
   176  	//
   177  	// After observing that the channel is not closed, we observe that the channel is
   178  	// not ready for sending. Each of these observations is a single word-sized read
   179  	// (first c.closed and second full()).
   180  	// Because a closed channel cannot transition from 'ready for sending' to
   181  	// 'not ready for sending', even if the channel is closed between the two observations,
   182  	// they imply a moment between the two when the channel was both not yet closed
   183  	// and not ready for sending. We behave as if we observed the channel at that moment,
   184  	// and report that the send cannot proceed.
   185  	//
   186  	// It is okay if the reads are reordered here: if we observe that the channel is not
   187  	// ready for sending and then observe that it is not closed, that implies that the
   188  	// channel wasn't closed during the first observation. However, nothing here
   189  	// guarantees forward progress. We rely on the side effects of lock release in
   190  	// chanrecv() and closechan() to update this thread's view of c.closed and full().
   191  	if !block && c.closed == 0 && full(c) {
   192  		return false
   193  	}
   194  
   195  	var t0 int64
   196  	if blockprofilerate > 0 {
   197  		t0 = cputicks()
   198  	}
   199  
   200  	lock(&c.lock)
   201  
   202  	if c.closed != 0 {
   203  		unlock(&c.lock)
   204  		panic(plainError("send on closed channel"))
   205  	}
   206  
   207  	if sg := c.recvq.dequeue(); sg != nil {
   208  		// Found a waiting receiver. We pass the value we want to send
   209  		// directly to the receiver, bypassing the channel buffer (if any).
   210  		send(c, sg, ep, func() { unlock(&c.lock) }, 3)
   211  		return true
   212  	}
   213  
   214  	if c.qcount < c.dataqsiz {
   215  		// Space is available in the channel buffer. Enqueue the element to send.
   216  		qp := chanbuf(c, c.sendx)
   217  		if raceenabled {
   218  			raceacquire(qp)
   219  			racerelease(qp)
   220  		}
   221  		typedmemmove(c.elemtype, qp, ep)
   222  		c.sendx++
   223  		if c.sendx == c.dataqsiz {
   224  			c.sendx = 0
   225  		}
   226  		c.qcount++
   227  		unlock(&c.lock)
   228  		return true
   229  	}
   230  
   231  	if !block {
   232  		unlock(&c.lock)
   233  		return false
   234  	}
   235  
   236  	// Block on the channel. Some receiver will complete our operation for us.
   237  	gp := getg()
   238  	mysg := acquireSudog()
   239  	mysg.releasetime = 0
   240  	if t0 != 0 {
   241  		mysg.releasetime = -1
   242  	}
   243  	// No stack splits between assigning elem and enqueuing mysg
   244  	// on gp.waiting where copystack can find it.
   245  	mysg.elem = ep
   246  	mysg.waitlink = nil
   247  	mysg.g = gp
   248  	mysg.isSelect = false
   249  	mysg.c = c
   250  	gp.waiting = mysg
   251  	gp.param = nil
   252  	c.sendq.enqueue(mysg)
   253  	gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2)
   254  	// Ensure the value being sent is kept alive until the
   255  	// receiver copies it out. The sudog has a pointer to the
   256  	// stack object, but sudogs aren't considered as roots of the
   257  	// stack tracer.
   258  	KeepAlive(ep)
   259  
   260  	// someone woke us up.
   261  	if mysg != gp.waiting {
   262  		throw("G waiting list is corrupted")
   263  	}
   264  	gp.waiting = nil
   265  	gp.activeStackChans = false
   266  	if gp.param == nil {
   267  		if c.closed == 0 {
   268  			throw("chansend: spurious wakeup")
   269  		}
   270  		panic(plainError("send on closed channel"))
   271  	}
   272  	gp.param = nil
   273  	if mysg.releasetime > 0 {
   274  		blockevent(mysg.releasetime-t0, 2)
   275  	}
   276  	mysg.c = nil
   277  	releaseSudog(mysg)
   278  	return true
   279  }
   280  
   281  // send processes a send operation on an empty channel c.
   282  // The value ep sent by the sender is copied to the receiver sg.
   283  // The receiver is then woken up to go on its merry way.
   284  // Channel c must be empty and locked.  send unlocks c with unlockf.
   285  // sg must already be dequeued from c.
   286  // ep must be non-nil and point to the heap or the caller's stack.
   287  func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
   288  	if raceenabled {
   289  		if c.dataqsiz == 0 {
   290  			racesync(c, sg)
   291  		} else {
   292  			// Pretend we go through the buffer, even though
   293  			// we copy directly. Note that we need to increment
   294  			// the head/tail locations only when raceenabled.
   295  			qp := chanbuf(c, c.recvx)
   296  			raceacquire(qp)
   297  			racerelease(qp)
   298  			raceacquireg(sg.g, qp)
   299  			racereleaseg(sg.g, qp)
   300  			c.recvx++
   301  			if c.recvx == c.dataqsiz {
   302  				c.recvx = 0
   303  			}
   304  			c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
   305  		}
   306  	}
   307  	if sg.elem != nil {
   308  		sendDirect(c.elemtype, sg, ep)
   309  		sg.elem = nil
   310  	}
   311  	gp := sg.g
   312  	unlockf()
   313  	gp.param = unsafe.Pointer(sg)
   314  	if sg.releasetime != 0 {
   315  		sg.releasetime = cputicks()
   316  	}
   317  	goready(gp, skip+1)
   318  }
   319  
   320  // Sends and receives on unbuffered or empty-buffered channels are the
   321  // only operations where one running goroutine writes to the stack of
   322  // another running goroutine. The GC assumes that stack writes only
   323  // happen when the goroutine is running and are only done by that
   324  // goroutine. Using a write barrier is sufficient to make up for
   325  // violating that assumption, but the write barrier has to work.
   326  // typedmemmove will call bulkBarrierPreWrite, but the target bytes
   327  // are not in the heap, so that will not help. We arrange to call
   328  // memmove and typeBitsBulkBarrier instead.
   329  
   330  func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
   331  	// src is on our stack, dst is a slot on another stack.
   332  
   333  	// Once we read sg.elem out of sg, it will no longer
   334  	// be updated if the destination's stack gets copied (shrunk).
   335  	// So make sure that no preemption points can happen between read & use.
   336  	dst := sg.elem
   337  	typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
   338  	// No need for cgo write barrier checks because dst is always
   339  	// Go memory.
   340  	memmove(dst, src, t.size)
   341  }
   342  
   343  func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) {
   344  	// dst is on our stack or the heap, src is on another stack.
   345  	// The channel is locked, so src will not move during this
   346  	// operation.
   347  	src := sg.elem
   348  	typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
   349  	memmove(dst, src, t.size)
   350  }
   351  
   352  func closechan(c *hchan) {
   353  	if c == nil {
   354  		panic(plainError("close of nil channel"))
   355  	}
   356  
   357  	lock(&c.lock)
   358  	if c.closed != 0 {
   359  		unlock(&c.lock)
   360  		panic(plainError("close of closed channel"))
   361  	}
   362  
   363  	if raceenabled {
   364  		callerpc := getcallerpc()
   365  		racewritepc(c.raceaddr(), callerpc, funcPC(closechan))
   366  		racerelease(c.raceaddr())
   367  	}
   368  
   369  	c.closed = 1
   370  
   371  	var glist gList
   372  
   373  	// release all readers
   374  	for {
   375  		sg := c.recvq.dequeue()
   376  		if sg == nil {
   377  			break
   378  		}
   379  		if sg.elem != nil {
   380  			typedmemclr(c.elemtype, sg.elem)
   381  			sg.elem = nil
   382  		}
   383  		if sg.releasetime != 0 {
   384  			sg.releasetime = cputicks()
   385  		}
   386  		gp := sg.g
   387  		gp.param = nil
   388  		if raceenabled {
   389  			raceacquireg(gp, c.raceaddr())
   390  		}
   391  		glist.push(gp)
   392  	}
   393  
   394  	// release all writers (they will panic)
   395  	for {
   396  		sg := c.sendq.dequeue()
   397  		if sg == nil {
   398  			break
   399  		}
   400  		sg.elem = nil
   401  		if sg.releasetime != 0 {
   402  			sg.releasetime = cputicks()
   403  		}
   404  		gp := sg.g
   405  		gp.param = nil
   406  		if raceenabled {
   407  			raceacquireg(gp, c.raceaddr())
   408  		}
   409  		glist.push(gp)
   410  	}
   411  	unlock(&c.lock)
   412  
   413  	// Ready all Gs now that we've dropped the channel lock.
   414  	for !glist.empty() {
   415  		gp := glist.pop()
   416  		gp.schedlink = 0
   417  		goready(gp, 3)
   418  	}
   419  }
   420  
   421  // empty reports whether a read from c would block (that is, the channel is
   422  // empty).  It uses a single atomic read of mutable state.
   423  func empty(c *hchan) bool {
   424  	// c.dataqsiz is immutable.
   425  	if c.dataqsiz == 0 {
   426  		return atomic.Loadp(unsafe.Pointer(&c.sendq.first)) == nil
   427  	}
   428  	return atomic.Loaduint(&c.qcount) == 0
   429  }
   430  
   431  // entry points for <- c from compiled code
   432  //go:nosplit
   433  func chanrecv1(c *hchan, elem unsafe.Pointer) {
   434  	chanrecv(c, elem, true)
   435  }
   436  
   437  //go:nosplit
   438  func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool) {
   439  	_, received = chanrecv(c, elem, true)
   440  	return
   441  }
   442  
   443  // chanrecv receives on channel c and writes the received data to ep.
   444  // ep may be nil, in which case received data is ignored.
   445  // If block == false and no elements are available, returns (false, false).
   446  // Otherwise, if c is closed, zeros *ep and returns (true, false).
   447  // Otherwise, fills in *ep with an element and returns (true, true).
   448  // A non-nil ep must point to the heap or the caller's stack.
   449  func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
   450  	// raceenabled: don't need to check ep, as it is always on the stack
   451  	// or is new memory allocated by reflect.
   452  
   453  	if debugChan {
   454  		print("chanrecv: chan=", c, "\n")
   455  	}
   456  
   457  	if c == nil {
   458  		if !block {
   459  			return
   460  		}
   461  		gopark(nil, nil, waitReasonChanReceiveNilChan, traceEvGoStop, 2)
   462  		throw("unreachable")
   463  	}
   464  
   465  	// Fast path: check for failed non-blocking operation without acquiring the lock.
   466  	if !block && empty(c) {
   467  		// After observing that the channel is not ready for receiving, we observe whether the
   468  		// channel is closed.
   469  		//
   470  		// Reordering of these checks could lead to incorrect behavior when racing with a close.
   471  		// For example, if the channel was open and not empty, was closed, and then drained,
   472  		// reordered reads could incorrectly indicate "open and empty". To prevent reordering,
   473  		// we use atomic loads for both checks, and rely on emptying and closing to happen in
   474  		// separate critical sections under the same lock.  This assumption fails when closing
   475  		// an unbuffered channel with a blocked send, but that is an error condition anyway.
   476  		if atomic.Load(&c.closed) == 0 {
   477  			// Because a channel cannot be reopened, the later observation of the channel
   478  			// being not closed implies that it was also not closed at the moment of the
   479  			// first observation. We behave as if we observed the channel at that moment
   480  			// and report that the receive cannot proceed.
   481  			return
   482  		}
   483  		// The channel is irreversibly closed. Re-check whether the channel has any pending data
   484  		// to receive, which could have arrived between the empty and closed checks above.
   485  		// Sequential consistency is also required here, when racing with such a send.
   486  		if empty(c) {
   487  			// The channel is irreversibly closed and empty.
   488  			if raceenabled {
   489  				raceacquire(c.raceaddr())
   490  			}
   491  			if ep != nil {
   492  				typedmemclr(c.elemtype, ep)
   493  			}
   494  			return true, false
   495  		}
   496  	}
   497  
   498  	var t0 int64
   499  	if blockprofilerate > 0 {
   500  		t0 = cputicks()
   501  	}
   502  
   503  	lock(&c.lock)
   504  
   505  	if c.closed != 0 && c.qcount == 0 {
   506  		if raceenabled {
   507  			raceacquire(c.raceaddr())
   508  		}
   509  		unlock(&c.lock)
   510  		if ep != nil {
   511  			typedmemclr(c.elemtype, ep)
   512  		}
   513  		return true, false
   514  	}
   515  
   516  	if sg := c.sendq.dequeue(); sg != nil {
   517  		// Found a waiting sender. If buffer is size 0, receive value
   518  		// directly from sender. Otherwise, receive from head of queue
   519  		// and add sender's value to the tail of the queue (both map to
   520  		// the same buffer slot because the queue is full).
   521  		recv(c, sg, ep, func() { unlock(&c.lock) }, 3)
   522  		return true, true
   523  	}
   524  
   525  	if c.qcount > 0 {
   526  		// Receive directly from queue
   527  		qp := chanbuf(c, c.recvx)
   528  		if raceenabled {
   529  			raceacquire(qp)
   530  			racerelease(qp)
   531  		}
   532  		if ep != nil {
   533  			typedmemmove(c.elemtype, ep, qp)
   534  		}
   535  		typedmemclr(c.elemtype, qp)
   536  		c.recvx++
   537  		if c.recvx == c.dataqsiz {
   538  			c.recvx = 0
   539  		}
   540  		c.qcount--
   541  		unlock(&c.lock)
   542  		return true, true
   543  	}
   544  
   545  	if !block {
   546  		unlock(&c.lock)
   547  		return false, false
   548  	}
   549  
   550  	// no sender available: block on this channel.
   551  	gp := getg()
   552  	mysg := acquireSudog()
   553  	mysg.releasetime = 0
   554  	if t0 != 0 {
   555  		mysg.releasetime = -1
   556  	}
   557  	// No stack splits between assigning elem and enqueuing mysg
   558  	// on gp.waiting where copystack can find it.
   559  	mysg.elem = ep
   560  	mysg.waitlink = nil
   561  	gp.waiting = mysg
   562  	mysg.g = gp
   563  	mysg.isSelect = false
   564  	mysg.c = c
   565  	gp.param = nil
   566  	c.recvq.enqueue(mysg)
   567  	gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2)
   568  
   569  	// someone woke us up
   570  	if mysg != gp.waiting {
   571  		throw("G waiting list is corrupted")
   572  	}
   573  	gp.waiting = nil
   574  	gp.activeStackChans = false
   575  	if mysg.releasetime > 0 {
   576  		blockevent(mysg.releasetime-t0, 2)
   577  	}
   578  	closed := gp.param == nil
   579  	gp.param = nil
   580  	mysg.c = nil
   581  	releaseSudog(mysg)
   582  	return true, !closed
   583  }
   584  
   585  // recv processes a receive operation on a full channel c.
   586  // There are 2 parts:
   587  // 1) The value sent by the sender sg is put into the channel
   588  //    and the sender is woken up to go on its merry way.
   589  // 2) The value received by the receiver (the current G) is
   590  //    written to ep.
   591  // For synchronous channels, both values are the same.
   592  // For asynchronous channels, the receiver gets its data from
   593  // the channel buffer and the sender's data is put in the
   594  // channel buffer.
   595  // Channel c must be full and locked. recv unlocks c with unlockf.
   596  // sg must already be dequeued from c.
   597  // A non-nil ep must point to the heap or the caller's stack.
   598  func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
   599  	if c.dataqsiz == 0 {
   600  		if raceenabled {
   601  			racesync(c, sg)
   602  		}
   603  		if ep != nil {
   604  			// copy data from sender
   605  			recvDirect(c.elemtype, sg, ep)
   606  		}
   607  	} else {
   608  		// Queue is full. Take the item at the
   609  		// head of the queue. Make the sender enqueue
   610  		// its item at the tail of the queue. Since the
   611  		// queue is full, those are both the same slot.
   612  		qp := chanbuf(c, c.recvx)
   613  		if raceenabled {
   614  			raceacquire(qp)
   615  			racerelease(qp)
   616  			raceacquireg(sg.g, qp)
   617  			racereleaseg(sg.g, qp)
   618  		}
   619  		// copy data from queue to receiver
   620  		if ep != nil {
   621  			typedmemmove(c.elemtype, ep, qp)
   622  		}
   623  		// copy data from sender to queue
   624  		typedmemmove(c.elemtype, qp, sg.elem)
   625  		c.recvx++
   626  		if c.recvx == c.dataqsiz {
   627  			c.recvx = 0
   628  		}
   629  		c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
   630  	}
   631  	sg.elem = nil
   632  	gp := sg.g
   633  	unlockf()
   634  	gp.param = unsafe.Pointer(sg)
   635  	if sg.releasetime != 0 {
   636  		sg.releasetime = cputicks()
   637  	}
   638  	goready(gp, skip+1)
   639  }
   640  
   641  func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool {
   642  	// There are unlocked sudogs that point into gp's stack. Stack
   643  	// copying must lock the channels of those sudogs.
   644  	gp.activeStackChans = true
   645  	unlock((*mutex)(chanLock))
   646  	return true
   647  }
   648  
   649  // compiler implements
   650  //
   651  //	select {
   652  //	case c <- v:
   653  //		... foo
   654  //	default:
   655  //		... bar
   656  //	}
   657  //
   658  // as
   659  //
   660  //	if selectnbsend(c, v) {
   661  //		... foo
   662  //	} else {
   663  //		... bar
   664  //	}
   665  //
   666  func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool) {
   667  	return chansend(c, elem, false, getcallerpc())
   668  }
   669  
   670  // compiler implements
   671  //
   672  //	select {
   673  //	case v = <-c:
   674  //		... foo
   675  //	default:
   676  //		... bar
   677  //	}
   678  //
   679  // as
   680  //
   681  //	if selectnbrecv(&v, c) {
   682  //		... foo
   683  //	} else {
   684  //		... bar
   685  //	}
   686  //
   687  func selectnbrecv(elem unsafe.Pointer, c *hchan) (selected bool) {
   688  	selected, _ = chanrecv(c, elem, false)
   689  	return
   690  }
   691  
   692  // compiler implements
   693  //
   694  //	select {
   695  //	case v, ok = <-c:
   696  //		... foo
   697  //	default:
   698  //		... bar
   699  //	}
   700  //
   701  // as
   702  //
   703  //	if c != nil && selectnbrecv2(&v, &ok, c) {
   704  //		... foo
   705  //	} else {
   706  //		... bar
   707  //	}
   708  //
   709  func selectnbrecv2(elem unsafe.Pointer, received *bool, c *hchan) (selected bool) {
   710  	// TODO(khr): just return 2 values from this function, now that it is in Go.
   711  	selected, *received = chanrecv(c, elem, false)
   712  	return
   713  }
   714  
   715  //go:linkname reflect_chansend reflect.chansend
   716  func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
   717  	return chansend(c, elem, !nb, getcallerpc())
   718  }
   719  
   720  //go:linkname reflect_chanrecv reflect.chanrecv
   721  func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
   722  	return chanrecv(c, elem, !nb)
   723  }
   724  
   725  //go:linkname reflect_chanlen reflect.chanlen
   726  func reflect_chanlen(c *hchan) int {
   727  	if c == nil {
   728  		return 0
   729  	}
   730  	return int(c.qcount)
   731  }
   732  
   733  //go:linkname reflectlite_chanlen internal/reflectlite.chanlen
   734  func reflectlite_chanlen(c *hchan) int {
   735  	if c == nil {
   736  		return 0
   737  	}
   738  	return int(c.qcount)
   739  }
   740  
   741  //go:linkname reflect_chancap reflect.chancap
   742  func reflect_chancap(c *hchan) int {
   743  	if c == nil {
   744  		return 0
   745  	}
   746  	return int(c.dataqsiz)
   747  }
   748  
   749  //go:linkname reflect_chanclose reflect.chanclose
   750  func reflect_chanclose(c *hchan) {
   751  	closechan(c)
   752  }
   753  
   754  func (q *waitq) enqueue(sgp *sudog) {
   755  	sgp.next = nil
   756  	x := q.last
   757  	if x == nil {
   758  		sgp.prev = nil
   759  		q.first = sgp
   760  		q.last = sgp
   761  		return
   762  	}
   763  	sgp.prev = x
   764  	x.next = sgp
   765  	q.last = sgp
   766  }
   767  
   768  func (q *waitq) dequeue() *sudog {
   769  	for {
   770  		sgp := q.first
   771  		if sgp == nil {
   772  			return nil
   773  		}
   774  		y := sgp.next
   775  		if y == nil {
   776  			q.first = nil
   777  			q.last = nil
   778  		} else {
   779  			y.prev = nil
   780  			q.first = y
   781  			sgp.next = nil // mark as removed (see dequeueSudog)
   782  		}
   783  
   784  		// if a goroutine was put on this queue because of a
   785  		// select, there is a small window between the goroutine
   786  		// being woken up by a different case and it grabbing the
   787  		// channel locks. Once it has the lock
   788  		// it removes itself from the queue, so we won't see it after that.
   789  		// We use a flag in the G struct to tell us when someone
   790  		// else has won the race to signal this goroutine but the goroutine
   791  		// hasn't removed itself from the queue yet.
   792  		if sgp.isSelect && !atomic.Cas(&sgp.g.selectDone, 0, 1) {
   793  			continue
   794  		}
   795  
   796  		return sgp
   797  	}
   798  }
   799  
   800  func (c *hchan) raceaddr() unsafe.Pointer {
   801  	// Treat read-like and write-like operations on the channel to
   802  	// happen at this address. Avoid using the address of qcount
   803  	// or dataqsiz, because the len() and cap() builtins read
   804  	// those addresses, and we don't want them racing with
   805  	// operations like close().
   806  	return unsafe.Pointer(&c.buf)
   807  }
   808  
   809  func racesync(c *hchan, sg *sudog) {
   810  	racerelease(chanbuf(c, 0))
   811  	raceacquireg(sg.g, chanbuf(c, 0))
   812  	racereleaseg(sg.g, chanbuf(c, 0))
   813  	raceacquire(chanbuf(c, 0))
   814  }
   815  

View as plain text